text
stringlengths 8
1.72M
| id
stringlengths 22
143
| metadata
dict | __index_level_0__
int64 0
104
|
---|---|---|---|
# system:
Don't make assumptions about what values to plug into functions. Ask for clarification if a user request is ambiguous.
# user:
{{prev_question}}
# assistant:
{{prev_answer}}
# function:
## name:
{{name}}
## content:
{{result}}
# user:
{{question}}
| promptflow/src/promptflow-tools/tests/test_configs/prompt_templates/prompt_with_function.jinja2/0 | {
"file_path": "promptflow/src/promptflow-tools/tests/test_configs/prompt_templates/prompt_with_function.jinja2",
"repo_id": "promptflow",
"token_count": 82
} | 34 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from promptflow._sdk._configuration import Configuration
# This logic is copied from: https://github.com/microsoft/knack/blob/dev/knack/help.py
# Will print privacy message and welcome when user run `pf` command.
PRIVACY_STATEMENT = """
Welcome to prompt flow!
---------------------
Use `pf -h` to see available commands or go to https://aka.ms/pf-cli.
Telemetry
---------
The prompt flow CLI collects usage data in order to improve your experience.
The data is anonymous and does not include commandline argument values.
The data is collected by Microsoft.
You can change your telemetry settings with `pf config`.
"""
WELCOME_MESSAGE = r"""
____ _ __ _
| _ \ _ __ ___ _ __ ___ _ __ | |_ / _| | _____ __
| |_) | '__/ _ \| '_ ` _ \| '_ \| __| | |_| |/ _ \ \ /\ / /
| __/| | | (_) | | | | | | |_) | |_ | _| | (_) \ V V /
|_| |_| \___/|_| |_| |_| .__/ \__| |_| |_|\___/ \_/\_/
|_|
Welcome to the cool prompt flow CLI!
Use `pf --version` to display the current version.
Here are the base commands:
"""
def show_privacy_statement():
config = Configuration.get_instance()
ran_before = config.get_config("first_run")
if not ran_before:
print(PRIVACY_STATEMENT)
config.set_config("first_run", True)
def show_welcome_message():
print(WELCOME_MESSAGE)
| promptflow/src/promptflow/promptflow/_cli/_pf/help.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/_pf/help.py",
"repo_id": "promptflow",
"token_count": 563
} | 35 |
{
"package": {},
"code": {
{% for key, prompt_obj in prompt_params.items() %}
"{{ key }}": {
"type": "prompt",
"inputs": {
{% for input_name, value in prompt_obj.get("inputs", {}).items() %}
"{{ input_name }}": {
"type": [
{% for typ in value["type"] %}
"{{ typ.value }}"
{% endfor %}
]
}{{ "," if not loop.last else "" }}
{% endfor %}
},
"source": "{{ prompt_obj.source }}"
},
{% endfor %}
"{{ tool_file }}": {
"type": "python",
"inputs": {
{% for arg, typ in tool_meta_args.items() %}
"{{ arg }}": {
"type": [
"{{ typ }}"
]
},
{% endfor %}
"connection": {
"type": [
"CustomConnection"
]
}
},
"function": "{{ tool_function }}",
"source": "{{ tool_file }}"
}
}
}
| promptflow/src/promptflow/promptflow/_cli/data/entry_flow/flow.tools.json.jinja2/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/data/entry_flow/flow.tools.json.jinja2",
"repo_id": "promptflow",
"token_count": 779
} | 36 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs:
text:
type: string
outputs:
output_prompt:
type: string
reference: ${echo_my_prompt.output}
nodes:
- name: hello_prompt
type: prompt
source:
type: code
path: hello.jinja2
inputs:
text: ${inputs.text}
- name: echo_my_prompt
type: python
source:
type: code
path: hello.py
inputs:
input1: ${hello_prompt.output}
environment:
python_requirements_txt: requirements.txt
| promptflow/src/promptflow/promptflow/_cli/data/standard_flow/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/data/standard_flow/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 205
} | 37 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from contextvars import ContextVar
from typing import Type, TypeVar
T = TypeVar("T")
class ThreadLocalSingleton:
# Use context variable to enable thread local singleton
# See reference: https://docs.python.org/3/library/contextvars.html#contextvars.ContextVar
CONTEXT_VAR_NAME = "ThreadLocalSingleton"
context_var = ContextVar(CONTEXT_VAR_NAME, default=None)
@classmethod
def active_instance(cls: Type[T]) -> T:
return cls.context_var.get()
@classmethod
def active(cls) -> bool:
return cls.active_instance() is not None
def _activate_in_context(self, force=False):
instance = self.active_instance()
if instance is not None and instance is not self and not force:
raise NotImplementedError(f"Cannot set active since there is another active instance: {instance}")
self.context_var.set(self)
def _deactivate_in_context(self):
self.context_var.set(None)
| promptflow/src/promptflow/promptflow/_core/thread_local_singleton.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_core/thread_local_singleton.py",
"repo_id": "promptflow",
"token_count": 370
} | 38 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import time
from functools import partial, wraps
from typing import Tuple, Union
from sqlalchemy.exc import OperationalError
def retry(exception_to_check: Union[Exception, Tuple[Exception]], tries=4, delay=3, backoff=2, logger=None):
"""
From https://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param exception_to_check: the exception to check. may be a tuple of
exceptions to check
:type exception_to_check: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
:param logger: log the retry action if specified
:type logger: logging.Logger
"""
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
retry_times, delay_seconds = tries, delay
while retry_times > 1:
try:
if logger:
logger.info("Running %s, %d more tries to go.", str(f), retry_times)
return f(*args, **kwargs)
except exception_to_check:
time.sleep(delay_seconds)
retry_times -= 1
delay_seconds *= backoff
if logger:
logger.warning("%s, Retrying in %d seconds...", str(exception_to_check), delay_seconds)
return f(*args, **kwargs)
return f_retry # true decorator
return deco_retry
sqlite_retry = partial(retry, exception_to_check=OperationalError, tries=3, delay=0.5, backoff=1)()
| promptflow/src/promptflow/promptflow/_sdk/_orm/retry.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_orm/retry.py",
"repo_id": "promptflow",
"token_count": 868
} | 39 |
{
"swagger": "2.0",
"basePath": "/v1.0",
"paths": {
"/Connections/": {
"get": {
"responses": {
"403": {
"description": "This service is available for local user only, please specify X-Remote-User in headers."
},
"200": {
"description": "Success",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/Connection"
}
}
}
},
"description": "List all connection",
"operationId": "get_connection_list",
"parameters": [
{
"name": "working_directory",
"in": "query",
"type": "string"
}
],
"tags": [
"Connections"
]
}
},
"/Connections/specs": {
"get": {
"responses": {
"200": {
"description": "List connection spec",
"schema": {
"$ref": "#/definitions/ConnectionSpec"
}
}
},
"description": "List connection spec",
"operationId": "get_connection_specs",
"tags": [
"Connections"
]
}
},
"/Connections/{name}": {
"parameters": [
{
"in": "path",
"description": "The connection name.",
"name": "name",
"required": true,
"type": "string"
}
],
"put": {
"responses": {
"403": {
"description": "This service is available for local user only, please specify X-Remote-User in headers."
},
"200": {
"description": "Connection details",
"schema": {
"$ref": "#/definitions/ConnectionDict"
}
}
},
"description": "Update connection",
"operationId": "put_connection",
"parameters": [
{
"name": "payload",
"required": true,
"in": "body",
"schema": {
"$ref": "#/definitions/ConnectionDict"
}
}
],
"tags": [
"Connections"
]
},
"get": {
"responses": {
"403": {
"description": "This service is available for local user only, please specify X-Remote-User in headers."
},
"200": {
"description": "Connection details",
"schema": {
"$ref": "#/definitions/ConnectionDict"
}
}
},
"description": "Get connection",
"operationId": "get_connection",
"parameters": [
{
"name": "working_directory",
"in": "query",
"type": "string"
}
],
"tags": [
"Connections"
]
},
"delete": {
"responses": {
"403": {
"description": "This service is available for local user only, please specify X-Remote-User in headers."
}
},
"description": "Delete connection",
"operationId": "delete_connection",
"tags": [
"Connections"
]
},
"post": {
"responses": {
"403": {
"description": "This service is available for local user only, please specify X-Remote-User in headers."
},
"200": {
"description": "Connection details",
"schema": {
"$ref": "#/definitions/ConnectionDict"
}
}
},
"description": "Create connection",
"operationId": "post_connection",
"parameters": [
{
"name": "payload",
"required": true,
"in": "body",
"schema": {
"$ref": "#/definitions/ConnectionDict"
}
}
],
"tags": [
"Connections"
]
}
},
"/Connections/{name}/listsecrets": {
"parameters": [
{
"name": "name",
"in": "path",
"required": true,
"type": "string"
}
],
"get": {
"responses": {
"403": {
"description": "This service is available for local user only, please specify X-Remote-User in headers."
},
"200": {
"description": "Connection details with secret",
"schema": {
"$ref": "#/definitions/ConnectionDict"
}
}
},
"description": "Get connection with secret",
"operationId": "get_connection_with_secret",
"parameters": [
{
"name": "working_directory",
"in": "query",
"type": "string"
}
],
"tags": [
"Connections"
]
}
},
"/Runs/": {
"get": {
"responses": {
"200": {
"description": "Runs",
"schema": {
"$ref": "#/definitions/RunList"
}
}
},
"description": "List all runs",
"operationId": "get_run_list",
"tags": [
"Runs"
]
}
},
"/Runs/submit": {
"post": {
"responses": {
"200": {
"description": "Submit run info",
"schema": {
"$ref": "#/definitions/RunDict"
}
}
},
"description": "Submit run",
"operationId": "post_run_submit",
"parameters": [
{
"name": "payload",
"required": true,
"in": "body",
"schema": {
"$ref": "#/definitions/RunDict"
}
}
],
"tags": [
"Runs"
]
}
},
"/Runs/{name}": {
"parameters": [
{
"name": "name",
"in": "path",
"required": true,
"type": "string"
}
],
"put": {
"responses": {
"200": {
"description": "Update run info",
"schema": {
"$ref": "#/definitions/RunDict"
}
}
},
"description": "Update run",
"operationId": "put_run",
"parameters": [
{
"name": "display_name",
"in": "formData",
"type": "string"
},
{
"name": "description",
"in": "formData",
"type": "string"
},
{
"name": "tags",
"in": "formData",
"type": "string"
}
],
"consumes": [
"application/x-www-form-urlencoded",
"multipart/form-data"
],
"tags": [
"Runs"
]
},
"get": {
"responses": {
"200": {
"description": "Get run info",
"schema": {
"$ref": "#/definitions/RunDict"
}
}
},
"description": "Get run",
"operationId": "get_run",
"tags": [
"Runs"
]
}
},
"/Runs/{name}/archive": {
"parameters": [
{
"name": "name",
"in": "path",
"required": true,
"type": "string"
}
],
"get": {
"responses": {
"200": {
"description": "Archived run",
"schema": {
"$ref": "#/definitions/RunDict"
}
}
},
"description": "Archive run",
"operationId": "get_archive_run",
"tags": [
"Runs"
]
}
},
"/Runs/{name}/childRuns": {
"parameters": [
{
"name": "name",
"in": "path",
"required": true,
"type": "string"
}
],
"get": {
"responses": {
"200": {
"description": "Child runs",
"schema": {
"$ref": "#/definitions/RunList"
}
}
},
"description": "Get child runs",
"operationId": "get_flow_child_runs",
"tags": [
"Runs"
]
}
},
"/Runs/{name}/logContent": {
"parameters": [
{
"name": "name",
"in": "path",
"required": true,
"type": "string"
}
],
"get": {
"responses": {
"200": {
"description": "Log content",
"schema": {
"type": "string"
}
}
},
"description": "Get run log content",
"operationId": "get_log_content",
"tags": [
"Runs"
]
}
},
"/Runs/{name}/metaData": {
"parameters": [
{
"name": "name",
"in": "path",
"required": true,
"type": "string"
}
],
"get": {
"responses": {
"200": {
"description": "Run metadata",
"schema": {
"$ref": "#/definitions/RunDict"
}
}
},
"description": "Get metadata of run",
"operationId": "get_meta_data",
"tags": [
"Runs"
]
}
},
"/Runs/{name}/metrics": {
"parameters": [
{
"name": "name",
"in": "path",
"required": true,
"type": "string"
}
],
"get": {
"responses": {
"200": {
"description": "Run metrics",
"schema": {
"$ref": "#/definitions/RunDict"
}
}
},
"description": "Get run metrics",
"operationId": "get_metrics",
"tags": [
"Runs"
]
}
},
"/Runs/{name}/nodeRuns/{node_name}": {
"parameters": [
{
"name": "name",
"in": "path",
"required": true,
"type": "string"
},
{
"name": "node_name",
"in": "path",
"required": true,
"type": "string"
}
],
"get": {
"responses": {
"200": {
"description": "Node runs",
"schema": {
"$ref": "#/definitions/RunList"
}
}
},
"description": "Get node runs info",
"operationId": "get_flow_node_runs",
"tags": [
"Runs"
]
}
},
"/Runs/{name}/restore": {
"parameters": [
{
"name": "name",
"in": "path",
"required": true,
"type": "string"
}
],
"get": {
"responses": {
"200": {
"description": "Restored run",
"schema": {
"$ref": "#/definitions/RunDict"
}
}
},
"description": "Restore run",
"operationId": "get_restore_run",
"tags": [
"Runs"
]
}
},
"/Runs/{name}/visualize": {
"parameters": [
{
"name": "name",
"in": "path",
"required": true,
"type": "string"
}
],
"get": {
"responses": {
"200": {
"description": "Visualize run",
"schema": {
"type": "string"
}
}
},
"description": "Visualize run",
"operationId": "get_visualize_run",
"produces": [
"text/html"
],
"tags": [
"Runs"
]
}
},
"/Telemetries/": {
"post": {
"responses": {
"403": {
"description": "Telemetry is disabled or X-Remote-User is not set.",
"headers": {
"x-ms-promptflow-request-id": {
"type": "string"
}
}
},
"400": {
"description": "Input payload validation failed",
"headers": {
"x-ms-promptflow-request-id": {
"type": "string"
}
}
},
"200": {
"description": "Create telemetry record",
"headers": {
"x-ms-promptflow-request-id": {
"type": "string"
}
}
}
},
"description": "Create telemetry record",
"operationId": "post_telemetry",
"parameters": [
{
"name": "payload",
"required": true,
"in": "body",
"schema": {
"$ref": "#/definitions/Telemetry"
}
}
],
"tags": [
"Telemetries"
]
}
}
},
"info": {
"title": "Prompt Flow Service",
"version": "1.0"
},
"produces": [
"application/json"
],
"consumes": [
"application/json"
],
"tags": [
{
"name": "Connections",
"description": "Connections Management"
},
{
"name": "Runs",
"description": "Runs Management"
},
{
"name": "Telemetries",
"description": "Telemetry Management"
}
],
"definitions": {
"Connection": {
"properties": {
"name": {
"type": "string"
},
"type": {
"type": "string"
},
"module": {
"type": "string"
},
"expiry_time": {
"type": "string"
},
"created_date": {
"type": "string"
},
"last_modified_date": {
"type": "string"
}
},
"type": "object"
},
"ConnectionDict": {
"additionalProperties": true,
"type": "object"
},
"ConnectionSpec": {
"properties": {
"connection_type": {
"type": "string"
},
"config_spec": {
"type": "array",
"items": {
"$ref": "#/definitions/ConnectionConfigSpec"
}
}
},
"type": "object"
},
"ConnectionConfigSpec": {
"properties": {
"name": {
"type": "string"
},
"optional": {
"type": "boolean"
},
"default": {
"type": "string"
}
},
"type": "object"
},
"RunList": {
"type": "array",
"items": {
"$ref": "#/definitions/RunDict"
}
},
"RunDict": {
"additionalProperties": true,
"type": "object"
},
"Telemetry": {
"required": [
"eventType",
"timestamp"
],
"properties": {
"eventType": {
"type": "string",
"description": "The event type of the telemetry.",
"example": "Start",
"enum": [
"Start",
"End"
]
},
"timestamp": {
"type": "string",
"format": "date-time",
"description": "The timestamp of the telemetry."
},
"firstCall": {
"type": "boolean",
"description": "Whether current activity is the first activity in the call chain.",
"default": true
},
"metadata": {
"$ref": "#/definitions/Metadata"
}
},
"type": "object"
},
"Metadata": {
"required": [
"activityName",
"activityType"
],
"properties": {
"activityName": {
"type": "string",
"description": "The name of the activity.",
"example": "pf.flow.test",
"enum": [
"pf.flow.test",
"pf.flow.node_test",
"pf.flow._generate_tools_meta"
]
},
"activityType": {
"type": "string",
"description": "The type of the activity."
},
"completionStatus": {
"type": "string",
"description": "The completion status of the activity.",
"example": "Success",
"enum": [
"Success",
"Failure"
]
},
"durationMs": {
"type": "integer",
"description": "The duration of the activity in milliseconds."
},
"errorCategory": {
"type": "string",
"description": "The error category of the activity."
},
"errorType": {
"type": "string",
"description": "The error type of the activity."
},
"errorTarget": {
"type": "string",
"description": "The error target of the activity."
},
"errorMessage": {
"type": "string",
"description": "The error message of the activity."
},
"errorDetails": {
"type": "string",
"description": "The error details of the activity."
}
},
"type": "object"
}
},
"responses": {
"ParseError": {
"description": "When a mask can't be parsed"
},
"MaskError": {
"description": "When any error occurs on mask"
},
"Exception": {
"description": "When any error occurs on the server, return a formatted error message"
}
}
}
| promptflow/src/promptflow/promptflow/_sdk/_service/swagger.json/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_service/swagger.json",
"repo_id": "promptflow",
"token_count": 16309
} | 40 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
class FlowDataCollector:
"""FlowDataCollector is used to collect flow data via MDC for monitoring."""
def __init__(self, logger):
self.logger = logger
self._init_success = self._init_data_collector()
logger.info(f"Mdc init status: {self._init_success}")
def _init_data_collector(self) -> bool:
"""init data collector."""
self.logger.info("Init mdc...")
try:
from azureml.ai.monitoring import Collector
self.inputs_collector = Collector(name="model_inputs")
self.outputs_collector = Collector(name="model_outputs")
return True
except ImportError as e:
self.logger.warn(f"Load mdc related module failed: {e}")
return False
except Exception as e:
self.logger.warn(f"Init mdc failed: {e}")
return False
def collect_flow_data(self, input: dict, output: dict, req_id: str = None, client_req_id: str = None):
"""collect flow data via MDC for monitoring."""
if not self._init_success:
return
try:
import pandas as pd
from azureml.ai.monitoring.context import BasicCorrelationContext
# build context
ctx = BasicCorrelationContext(id=req_id)
# collect inputs
coll_input = {k: [v] for k, v in input.items()}
input_df = pd.DataFrame(coll_input)
self.inputs_collector.collect(input_df, ctx)
# collect outputs
coll_output = {k: [v] for k, v in output.items()}
output_df = pd.DataFrame(coll_output)
# collect outputs data, pass in correlation_context so inputs and outputs data can be correlated later
self.outputs_collector.collect(output_df, ctx)
except ImportError as e:
self.logger.warn(f"Load mdc related module failed: {e}")
except Exception as e:
self.logger.warn(f"Collect flow data failed: {e}")
| promptflow/src/promptflow/promptflow/_sdk/_serving/monitor/data_collector.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_serving/monitor/data_collector.py",
"repo_id": "promptflow",
"token_count": 912
} | 41 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import logging
import os
import platform
import sys
from opencensus.ext.azure.log_exporter import AzureEventHandler
from promptflow._sdk._configuration import Configuration
# promptflow-sdk in east us
INSTRUMENTATION_KEY = "8b52b368-4c91-4226-b7f7-be52822f0509"
# cspell:ignore overriden
def get_appinsights_log_handler():
"""
Enable the OpenCensus logging handler for specified logger and instrumentation key to send info to AppInsights.
"""
from promptflow._sdk._telemetry.telemetry import is_telemetry_enabled
try:
config = Configuration.get_instance()
instrumentation_key = INSTRUMENTATION_KEY
custom_properties = {
"python_version": platform.python_version(),
"installation_id": config.get_or_set_installation_id(),
}
handler = PromptFlowSDKLogHandler(
connection_string=f"InstrumentationKey={instrumentation_key}",
custom_properties=custom_properties,
enable_telemetry=is_telemetry_enabled(),
)
return handler
except Exception: # pylint: disable=broad-except
# ignore any exceptions, telemetry collection errors shouldn't block an operation
return logging.NullHandler()
def get_scrubbed_cloud_role():
"""Create cloud role for telemetry, will scrub user script name and only leave extension."""
default = "Unknown Application"
known_scripts = [
"pfs",
"pfutil.py",
"pf",
"pfazure",
"pf.exe",
"pfazure.exe",
"app.py",
"python -m unittest",
"pytest",
"gunicorn",
"ipykernel_launcher.py",
"jupyter-notebook",
"jupyter-lab",
"python",
"_jb_pytest_runner.py",
default,
]
try:
cloud_role = os.path.basename(sys.argv[0]) or default
if cloud_role not in known_scripts:
ext = os.path.splitext(cloud_role)[1]
cloud_role = "***" + ext
except Exception:
# fallback to default cloud role if failed to scrub
cloud_role = default
return cloud_role
# cspell:ignore AzureMLSDKLogHandler
class PromptFlowSDKLogHandler(AzureEventHandler):
"""Customized AzureLogHandler for PromptFlow SDK"""
def __init__(self, custom_properties, enable_telemetry, **kwargs):
super().__init__(**kwargs)
# disable AzureEventHandler's logging to avoid warning affect user experience
self.disable_telemetry_logger()
self._is_telemetry_enabled = enable_telemetry
self._custom_dimensions = custom_properties
def _check_stats_collection(self):
# skip checking stats collection since it's time-consuming
# according to doc: https://learn.microsoft.com/en-us/azure/azure-monitor/app/statsbeat
# it doesn't affect customers' overall monitoring volume
return False
def emit(self, record):
# skip logging if telemetry is disabled
if not self._is_telemetry_enabled:
return
try:
self._queue.put(record, block=False)
# log the record immediately if it is an error
if record.exc_info and not all(item is None for item in record.exc_info):
self._queue.flush()
except Exception: # pylint: disable=broad-except
# ignore any exceptions, telemetry collection errors shouldn't block an operation
return
def log_record_to_envelope(self, record):
from promptflow._utils.utils import is_in_ci_pipeline
# skip logging if telemetry is disabled
if not self._is_telemetry_enabled:
return
custom_dimensions = {
"level": record.levelname,
# add to distinguish if the log is from ci pipeline
"from_ci": is_in_ci_pipeline(),
}
custom_dimensions.update(self._custom_dimensions)
if hasattr(record, "custom_dimensions") and isinstance(record.custom_dimensions, dict):
record.custom_dimensions.update(custom_dimensions)
else:
record.custom_dimensions = custom_dimensions
envelope = super().log_record_to_envelope(record=record)
# scrub data before sending to appinsights
role = get_scrubbed_cloud_role()
envelope.tags["ai.cloud.role"] = role
envelope.tags.pop("ai.cloud.roleInstance", None)
envelope.tags.pop("ai.device.id", None)
return envelope
@classmethod
def disable_telemetry_logger(cls):
"""Disable AzureEventHandler's logging to avoid warning affect user experience"""
from opencensus.ext.azure.common.processor import logger as processor_logger
from opencensus.ext.azure.common.storage import logger as storage_logger
from opencensus.ext.azure.common.transport import logger as transport_logger
processor_logger.setLevel(logging.CRITICAL)
transport_logger.setLevel(logging.CRITICAL)
storage_logger.setLevel(logging.CRITICAL)
| promptflow/src/promptflow/promptflow/_sdk/_telemetry/logging_handler.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_telemetry/logging_handler.py",
"repo_id": "promptflow",
"token_count": 2042
} | 42 |
#!/bin/bash
echo "$(date -uIns) - promptflow-serve/finish $@"
echo "$(date -uIns) - Stopped all Gunicorn processes" | promptflow/src/promptflow/promptflow/_sdk/data/docker_csharp/runit/promptflow-serve/finish.jinja2/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/data/docker_csharp/runit/promptflow-serve/finish.jinja2",
"repo_id": "promptflow",
"token_count": 45
} | 43 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import abc
import json
from os import PathLike
from pathlib import Path
from typing import Dict, Optional, Tuple, Union
from marshmallow import Schema
from promptflow._constants import LANGUAGE_KEY, FlowLanguage
from promptflow._sdk._constants import (
BASE_PATH_CONTEXT_KEY,
DAG_FILE_NAME,
DEFAULT_ENCODING,
FLOW_TOOLS_JSON,
PROMPT_FLOW_DIR_NAME,
)
from promptflow._sdk.entities._connection import _Connection
from promptflow._sdk.entities._validation import SchemaValidatableMixin
from promptflow._utils.flow_utils import resolve_flow_path
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.yaml_utils import load_yaml, load_yaml_string
from promptflow.exceptions import ErrorTarget, UserErrorException
logger = get_cli_sdk_logger()
class FlowContext:
"""Flow context entity. the settings on this context will be applied to the flow when executing.
:param connections: Connections for the flow.
:type connections: Optional[Dict[str, Dict]]
:param variant: Variant of the flow.
:type variant: Optional[str]
:param variant: Overrides of the flow.
:type variant: Optional[Dict[str, Dict]]
:param streaming: Whether the flow's output need to be return in streaming mode.
:type streaming: Optional[bool]
"""
def __init__(
self,
*,
connections=None,
variant=None,
overrides=None,
streaming=None,
):
self.connections, self._connection_objs = connections or {}, {}
self.variant = variant
self.overrides = overrides or {}
self.streaming = streaming
# TODO: introduce connection provider support
def _resolve_connections(self):
# resolve connections and create placeholder for connection objects
for _, v in self.connections.items():
if isinstance(v, dict):
for k, conn in v.items():
if isinstance(conn, _Connection):
name = self._get_connection_obj_name(conn)
v[k] = name
self._connection_objs[name] = conn
@classmethod
def _get_connection_obj_name(cls, connection: _Connection):
# create a unique connection name for connection obj
# will generate same name if connection has same content
connection_dict = connection._to_dict()
connection_name = f"connection_{hash(json.dumps(connection_dict, sort_keys=True))}"
return connection_name
def _to_dict(self):
return {
"connections": self.connections,
"variant": self.variant,
"overrides": self.overrides,
"streaming": self.streaming,
}
def __eq__(self, other):
if isinstance(other, FlowContext):
return self._to_dict() == other._to_dict()
return False
def __hash__(self):
self._resolve_connections()
return hash(json.dumps(self._to_dict(), sort_keys=True))
class FlowBase(abc.ABC):
def __init__(self, **kwargs):
self._context = FlowContext()
self._content_hash = kwargs.pop("content_hash", None)
super().__init__(**kwargs)
@property
def context(self) -> FlowContext:
return self._context
@context.setter
def context(self, val):
if not isinstance(val, FlowContext):
raise UserErrorException("context must be a FlowContext object, got {type(val)} instead.")
self._context = val
@property
@abc.abstractmethod
def language(self) -> str:
"""Language of the flow."""
@classmethod
# pylint: disable=unused-argument
def _resolve_cls_and_type(cls, data, params_override):
"""Resolve the class to use for deserializing the data. Return current class if no override is provided.
:param data: Data to deserialize.
:type data: dict
:param params_override: Parameters to override, defaults to None
:type params_override: typing.Optional[list]
:return: Class to use for deserializing the data & its "type". Type will be None if no override is provided.
:rtype: tuple[class, typing.Optional[str]]
"""
return cls, "flow"
class Flow(FlowBase):
"""This class is used to represent a flow."""
def __init__(
self,
code: Union[str, PathLike],
dag: dict,
**kwargs,
):
self._code = Path(code)
path = kwargs.pop("path", None)
self._path = Path(path) if path else None
self.variant = kwargs.pop("variant", None) or {}
self.dag = dag
super().__init__(**kwargs)
@property
def code(self) -> Path:
return self._code
@code.setter
def code(self, value: Union[str, PathLike, Path]):
self._code = value
@property
def path(self) -> Path:
flow_file = self._path or self.code / DAG_FILE_NAME
if not flow_file.is_file():
raise UserErrorException(
"The directory does not contain a valid flow.",
target=ErrorTarget.CONTROL_PLANE_SDK,
)
return flow_file
@property
def language(self) -> str:
return self.dag.get(LANGUAGE_KEY, FlowLanguage.Python)
@classmethod
def _is_eager_flow(cls, data: dict):
"""Check if the flow is an eager flow. Use field 'entry' to determine."""
# If entry specified, it's an eager flow.
return data.get("entry")
@classmethod
def load(
cls,
source: Union[str, PathLike],
entry: str = None,
**kwargs,
):
from promptflow._sdk.entities._eager_flow import EagerFlow
source_path = Path(source)
if not source_path.exists():
raise UserErrorException(f"Source {source_path.absolute().as_posix()} does not exist")
flow_path = resolve_flow_path(source_path)
if not flow_path.exists():
raise UserErrorException(f"Flow file {flow_path.absolute().as_posix()} does not exist")
if flow_path.suffix in [".yaml", ".yml"]:
# read flow file to get hash
with open(flow_path, "r", encoding=DEFAULT_ENCODING) as f:
flow_content = f.read()
data = load_yaml_string(flow_content)
kwargs["content_hash"] = hash(flow_content)
is_eager_flow = cls._is_eager_flow(data)
if is_eager_flow:
return EagerFlow._load(path=flow_path, entry=entry, data=data, **kwargs)
else:
# TODO: schema validation and warning on unknown fields
return ProtectedFlow._load(path=flow_path, dag=data, **kwargs)
# if non-YAML file is provided, treat is as eager flow
return EagerFlow._load(path=flow_path, entry=entry, **kwargs)
def _init_executable(self, tuning_node=None, variant=None):
from promptflow._sdk._submitter import variant_overwrite_context
# TODO: check if there is potential bug here
# this is a little wired:
# 1. the executable is created from a temp folder when there is additional includes
# 2. after the executable is returned, the temp folder is deleted
with variant_overwrite_context(self.code, tuning_node, variant) as flow:
from promptflow.contracts.flow import Flow as ExecutableFlow
return ExecutableFlow.from_yaml(flow_file=flow.path, working_dir=flow.code)
def __eq__(self, other):
if isinstance(other, Flow):
return self._content_hash == other._content_hash and self.context == other.context
return False
def __hash__(self):
return hash(self.context) ^ self._content_hash
class ProtectedFlow(Flow, SchemaValidatableMixin):
"""This class is used to hide internal interfaces from user.
User interface should be carefully designed to avoid breaking changes, while developers may need to change internal
interfaces to improve the code quality. On the other hand, making all internal interfaces private will make it
strange to use them everywhere inside this package.
Ideally, developers should always initialize ProtectedFlow object instead of Flow object.
"""
def __init__(
self,
code: str,
params_override: Optional[Dict] = None,
**kwargs,
):
super().__init__(code=code, **kwargs)
self._flow_dir, self._dag_file_name = self._get_flow_definition(self.code)
self._executable = None
self._params_override = params_override
@classmethod
def _load(cls, path: Path, dag: dict, **kwargs):
return cls(code=path.parent.absolute().as_posix(), dag=dag, **kwargs)
@property
def flow_dag_path(self) -> Path:
return self._flow_dir / self._dag_file_name
@property
def name(self) -> str:
return self._flow_dir.name
@property
def display_name(self) -> str:
return self.dag.get("display_name", self.name)
@property
def tools_meta_path(self) -> Path:
target_path = self._flow_dir / PROMPT_FLOW_DIR_NAME / FLOW_TOOLS_JSON
target_path.parent.mkdir(parents=True, exist_ok=True)
return target_path
@classmethod
def _get_flow_definition(cls, flow, base_path=None) -> Tuple[Path, str]:
if base_path:
flow_path = Path(base_path) / flow
else:
flow_path = Path(flow)
if flow_path.is_dir() and (flow_path / DAG_FILE_NAME).is_file():
return flow_path, DAG_FILE_NAME
elif flow_path.is_file():
return flow_path.parent, flow_path.name
raise ValueError(f"Can't find flow with path {flow_path.as_posix()}.")
# region SchemaValidatableMixin
@classmethod
def _create_schema_for_validation(cls, context) -> Schema:
# import here to avoid circular import
from ..schemas._flow import FlowSchema
return FlowSchema(context=context)
def _default_context(self) -> dict:
return {BASE_PATH_CONTEXT_KEY: self._flow_dir}
def _create_validation_error(self, message, no_personal_data_message=None):
return UserErrorException(
message=message,
target=ErrorTarget.CONTROL_PLANE_SDK,
no_personal_data_message=no_personal_data_message,
)
def _dump_for_validation(self) -> Dict:
# Flow is read-only in control plane, so we always dump the flow from file
data = load_yaml(self.flow_dag_path)
if isinstance(self._params_override, dict):
data.update(self._params_override)
return data
# endregion
# region MLFlow model requirements
@property
def inputs(self):
# This is used for build mlflow model signature.
if not self._executable:
self._executable = self._init_executable()
return {k: v.type.value for k, v in self._executable.inputs.items()}
@property
def outputs(self):
# This is used for build mlflow model signature.
if not self._executable:
self._executable = self._init_executable()
return {k: v.type.value for k, v in self._executable.outputs.items()}
# endregion
def __call__(self, *args, **kwargs):
"""Calling flow as a function, the inputs should be provided with key word arguments.
Returns the output of the flow.
The function call throws UserErrorException: if the flow is not valid or the inputs are not valid.
SystemErrorException: if the flow execution failed due to unexpected executor error.
:param args: positional arguments are not supported.
:param kwargs: flow inputs with key word arguments.
:return:
"""
if args:
raise UserErrorException("Flow can only be called with keyword arguments.")
result = self.invoke(inputs=kwargs)
return result.output
def invoke(self, inputs: dict) -> "LineResult":
"""Invoke a flow and get a LineResult object."""
from promptflow._sdk._submitter.test_submitter import TestSubmitterViaProxy
from promptflow._sdk.operations._flow_context_resolver import FlowContextResolver
if self.dag.get(LANGUAGE_KEY, FlowLanguage.Python) == FlowLanguage.CSharp:
with TestSubmitterViaProxy(flow=self, flow_context=self.context).init() as submitter:
result = submitter.exec_with_inputs(
inputs=inputs,
)
return result
else:
invoker = FlowContextResolver.resolve(flow=self)
result = invoker._invoke(
data=inputs,
)
return result
| promptflow/src/promptflow/promptflow/_sdk/entities/_flow.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/entities/_flow.py",
"repo_id": "promptflow",
"token_count": 5217
} | 44 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
| promptflow/src/promptflow/promptflow/_sdk/schemas/__init__.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/schemas/__init__.py",
"repo_id": "promptflow",
"token_count": 54
} | 45 |
from dataclasses import dataclass
from enum import Enum
from typing import Optional
class FeatureState(Enum):
"""The enum of feature state.
READY: The feature is ready to use.
E2ETEST: The feature is not ready to be shipped to customer and is in e2e testing.
"""
READY = "Ready"
E2ETEST = "E2ETest"
@dataclass
class Feature:
"""The dataclass of feature."""
name: str
description: str
state: FeatureState
component: Optional[str] = "executor"
def get_feature_list():
feature_list = [
Feature(
name="ActivateConfig",
description="Bypass node execution when the node does not meet activate condition.",
state=FeatureState.READY,
),
Feature(
name="Image",
description="Support image input and output.",
state=FeatureState.READY,
),
Feature(
name="EnvironmentVariablesInYaml",
description="Support environment variables in flow.dag.yaml.",
state=FeatureState.READY,
),
]
return feature_list
| promptflow/src/promptflow/promptflow/_utils/feature_utils.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_utils/feature_utils.py",
"repo_id": "promptflow",
"token_count": 447
} | 46 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from pathlib import Path
RESOURCE_FOLDER = Path(__file__).parent.parent / "resources"
COMMAND_COMPONENT_SPEC_TEMPLATE = RESOURCE_FOLDER / "component_spec_template.yaml"
DEFAULT_PYTHON_VERSION = "3.9"
| promptflow/src/promptflow/promptflow/azure/_constants/_component.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_constants/_component.py",
"repo_id": "promptflow",
"token_count": 102
} | 47 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.8.0, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, Optional
from azure.core import AsyncPipelineClient
from azure.core.rest import AsyncHttpResponse, HttpRequest
from msrest import Deserializer, Serializer
from .. import models
from ._configuration import AzureMachineLearningDesignerServiceClientConfiguration
from .operations import BulkRunsOperations, ConnectionOperations, ConnectionsOperations, FlowRunsAdminOperations, FlowRuntimesOperations, FlowRuntimesWorkspaceIndependentOperations, FlowSessionsOperations, FlowsOperations, FlowsProviderOperations, ToolsOperations
class AzureMachineLearningDesignerServiceClient:
"""AzureMachineLearningDesignerServiceClient.
:ivar bulk_runs: BulkRunsOperations operations
:vartype bulk_runs: flow.aio.operations.BulkRunsOperations
:ivar connection: ConnectionOperations operations
:vartype connection: flow.aio.operations.ConnectionOperations
:ivar connections: ConnectionsOperations operations
:vartype connections: flow.aio.operations.ConnectionsOperations
:ivar flow_runs_admin: FlowRunsAdminOperations operations
:vartype flow_runs_admin: flow.aio.operations.FlowRunsAdminOperations
:ivar flow_runtimes: FlowRuntimesOperations operations
:vartype flow_runtimes: flow.aio.operations.FlowRuntimesOperations
:ivar flow_runtimes_workspace_independent: FlowRuntimesWorkspaceIndependentOperations
operations
:vartype flow_runtimes_workspace_independent:
flow.aio.operations.FlowRuntimesWorkspaceIndependentOperations
:ivar flows: FlowsOperations operations
:vartype flows: flow.aio.operations.FlowsOperations
:ivar flow_sessions: FlowSessionsOperations operations
:vartype flow_sessions: flow.aio.operations.FlowSessionsOperations
:ivar flows_provider: FlowsProviderOperations operations
:vartype flows_provider: flow.aio.operations.FlowsProviderOperations
:ivar tools: ToolsOperations operations
:vartype tools: flow.aio.operations.ToolsOperations
:param base_url: Service URL. Default value is ''.
:type base_url: str
:param api_version: Api Version. The default value is "1.0.0".
:type api_version: str
"""
def __init__(
self,
base_url: str = "",
api_version: Optional[str] = "1.0.0",
**kwargs: Any
) -> None:
self._config = AzureMachineLearningDesignerServiceClientConfiguration(api_version=api_version, **kwargs)
self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.bulk_runs = BulkRunsOperations(self._client, self._config, self._serialize, self._deserialize)
self.connection = ConnectionOperations(self._client, self._config, self._serialize, self._deserialize)
self.connections = ConnectionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.flow_runs_admin = FlowRunsAdminOperations(self._client, self._config, self._serialize, self._deserialize)
self.flow_runtimes = FlowRuntimesOperations(self._client, self._config, self._serialize, self._deserialize)
self.flow_runtimes_workspace_independent = FlowRuntimesWorkspaceIndependentOperations(self._client, self._config, self._serialize, self._deserialize)
self.flows = FlowsOperations(self._client, self._config, self._serialize, self._deserialize)
self.flow_sessions = FlowSessionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.flows_provider = FlowsProviderOperations(self._client, self._config, self._serialize, self._deserialize)
self.tools = ToolsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "AzureMachineLearningDesignerServiceClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/_azure_machine_learning_designer_service_client.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/_azure_machine_learning_designer_service_client.py",
"repo_id": "promptflow",
"token_count": 1947
} | 48 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.8.0, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
class ActionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
SEND_VALIDATION_REQUEST = "SendValidationRequest"
GET_VALIDATION_STATUS = "GetValidationStatus"
SUBMIT_BULK_RUN = "SubmitBulkRun"
LOG_RUN_RESULT = "LogRunResult"
LOG_RUN_TERMINATED_EVENT = "LogRunTerminatedEvent"
class AetherArgumentValueType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
LITERAL = "Literal"
PARAMETER = "Parameter"
INPUT = "Input"
OUTPUT = "Output"
NESTED_LIST = "NestedList"
STRING_INTERPOLATION_LIST = "StringInterpolationList"
class AetherAssetType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
URI_FILE = "UriFile"
URI_FOLDER = "UriFolder"
ML_TABLE = "MLTable"
CUSTOM_MODEL = "CustomModel"
ML_FLOW_MODEL = "MLFlowModel"
TRITON_MODEL = "TritonModel"
OPEN_AI_MODEL = "OpenAIModel"
class AetherBuildSourceType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
CLOUD_BUILD = "CloudBuild"
VSO = "Vso"
VSO_GIT = "VsoGit"
class AetherComputeType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
BATCH_AI = "BatchAi"
MLC = "MLC"
HDI_CLUSTER = "HdiCluster"
REMOTE_DOCKER = "RemoteDocker"
DATABRICKS = "Databricks"
AISC = "Aisc"
class AetherControlFlowType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
DO_WHILE = "DoWhile"
PARALLEL_FOR = "ParallelFor"
class AetherControlInputValue(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
FALSE = "False"
TRUE = "True"
SKIPPED = "Skipped"
class AetherDataCopyMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
MERGE_WITH_OVERWRITE = "MergeWithOverwrite"
FAIL_IF_CONFLICT = "FailIfConflict"
class AetherDataLocationStorageType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
COSMOS = "Cosmos"
AZURE_BLOB = "AzureBlob"
ARTIFACT = "Artifact"
SNAPSHOT = "Snapshot"
SAVED_AML_DATASET = "SavedAmlDataset"
ASSET = "Asset"
class AetherDataReferenceType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
AZURE_BLOB = "AzureBlob"
AZURE_DATA_LAKE = "AzureDataLake"
AZURE_FILES = "AzureFiles"
COSMOS = "Cosmos"
PHILLY_HDFS = "PhillyHdfs"
AZURE_SQL_DATABASE = "AzureSqlDatabase"
AZURE_POSTGRES_DATABASE = "AzurePostgresDatabase"
AZURE_DATA_LAKE_GEN2 = "AzureDataLakeGen2"
DBFS = "DBFS"
AZURE_MY_SQL_DATABASE = "AzureMySqlDatabase"
CUSTOM = "Custom"
HDFS = "Hdfs"
class AetherDatasetType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
FILE = "File"
TABULAR = "Tabular"
class AetherDataStoreMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
MOUNT = "Mount"
DOWNLOAD = "Download"
UPLOAD = "Upload"
DIRECT = "Direct"
HDFS = "Hdfs"
LINK = "Link"
class AetherDataTransferStorageType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
DATA_BASE = "DataBase"
FILE_SYSTEM = "FileSystem"
class AetherDataTransferTaskType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
IMPORT_DATA = "ImportData"
EXPORT_DATA = "ExportData"
COPY_DATA = "CopyData"
class AetherEarlyTerminationPolicyType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
BANDIT = "Bandit"
MEDIAN_STOPPING = "MedianStopping"
TRUNCATION_SELECTION = "TruncationSelection"
class AetherEntityStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ACTIVE = "Active"
DEPRECATED = "Deprecated"
DISABLED = "Disabled"
class AetherExecutionEnvironment(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
EXE_WORKER_MACHINE = "ExeWorkerMachine"
DOCKER_CONTAINER_WITHOUT_NETWORK = "DockerContainerWithoutNetwork"
DOCKER_CONTAINER_WITH_NETWORK = "DockerContainerWithNetwork"
HYPER_V_WITHOUT_NETWORK = "HyperVWithoutNetwork"
HYPER_V_WITH_NETWORK = "HyperVWithNetwork"
class AetherExecutionPhase(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
EXECUTION = "Execution"
INITIALIZATION = "Initialization"
FINALIZATION = "Finalization"
class AetherFeaturizationMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
AUTO = "Auto"
CUSTOM = "Custom"
OFF = "Off"
class AetherFileBasedPathType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "Unknown"
FILE = "File"
FOLDER = "Folder"
class AetherForecastHorizonMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
AUTO = "Auto"
CUSTOM = "Custom"
class AetherIdentityType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
USER_IDENTITY = "UserIdentity"
MANAGED = "Managed"
AML_TOKEN = "AMLToken"
class AetherLogVerbosity(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NOT_SET = "NotSet"
DEBUG = "Debug"
INFO = "Info"
WARNING = "Warning"
ERROR = "Error"
CRITICAL = "Critical"
class AetherModuleDeploymentSource(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
CLIENT = "Client"
AUTO_DEPLOYMENT = "AutoDeployment"
VSTS = "Vsts"
class AetherModuleHashVersion(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
IDENTIFIER_HASH = "IdentifierHash"
IDENTIFIER_HASH_V2 = "IdentifierHashV2"
class AetherModuleType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
BATCH_INFERENCING = "BatchInferencing"
class AetherNCrossValidationMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
AUTO = "Auto"
CUSTOM = "Custom"
class AetherParameterType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
INT = "Int"
DOUBLE = "Double"
BOOL = "Bool"
STRING = "String"
UNDEFINED = "Undefined"
class AetherParameterValueType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
LITERAL = "Literal"
GRAPH_PARAMETER_NAME = "GraphParameterName"
CONCATENATE = "Concatenate"
INPUT = "Input"
DATA_PATH = "DataPath"
DATA_SET_DEFINITION = "DataSetDefinition"
class AetherPrimaryMetrics(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
AUC_WEIGHTED = "AUCWeighted"
ACCURACY = "Accuracy"
NORM_MACRO_RECALL = "NormMacroRecall"
AVERAGE_PRECISION_SCORE_WEIGHTED = "AveragePrecisionScoreWeighted"
PRECISION_SCORE_WEIGHTED = "PrecisionScoreWeighted"
SPEARMAN_CORRELATION = "SpearmanCorrelation"
NORMALIZED_ROOT_MEAN_SQUARED_ERROR = "NormalizedRootMeanSquaredError"
R2_SCORE = "R2Score"
NORMALIZED_MEAN_ABSOLUTE_ERROR = "NormalizedMeanAbsoluteError"
NORMALIZED_ROOT_MEAN_SQUARED_LOG_ERROR = "NormalizedRootMeanSquaredLogError"
MEAN_AVERAGE_PRECISION = "MeanAveragePrecision"
IOU = "Iou"
class AetherRepositoryType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
OTHER = "Other"
GIT = "Git"
SOURCE_DEPOT = "SourceDepot"
COSMOS = "Cosmos"
class AetherResourceOperator(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
EQUAL = "Equal"
CONTAIN = "Contain"
GREATER_OR_EQUAL = "GreaterOrEqual"
class AetherResourceValueType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
STRING = "String"
DOUBLE = "Double"
class AetherSamplingAlgorithmType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
RANDOM = "Random"
GRID = "Grid"
BAYESIAN = "Bayesian"
class AetherSeasonalityMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
AUTO = "Auto"
CUSTOM = "Custom"
class AetherShortSeriesHandlingConfiguration(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
AUTO = "Auto"
PAD = "Pad"
DROP = "Drop"
class AetherStackMetaLearnerType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
LOGISTIC_REGRESSION = "LogisticRegression"
LOGISTIC_REGRESSION_CV = "LogisticRegressionCV"
LIGHT_GBM_CLASSIFIER = "LightGBMClassifier"
ELASTIC_NET = "ElasticNet"
ELASTIC_NET_CV = "ElasticNetCV"
LIGHT_GBM_REGRESSOR = "LightGBMRegressor"
LINEAR_REGRESSION = "LinearRegression"
class AetherStoredProcedureParameterType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
STRING = "String"
INT = "Int"
DECIMAL = "Decimal"
GUID = "Guid"
BOOLEAN = "Boolean"
DATE = "Date"
class AetherTabularTrainingMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
DISTRIBUTED = "Distributed"
NON_DISTRIBUTED = "NonDistributed"
AUTO = "Auto"
class AetherTargetAggregationFunction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
SUM = "Sum"
MAX = "Max"
MIN = "Min"
MEAN = "Mean"
class AetherTargetLagsMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
AUTO = "Auto"
CUSTOM = "Custom"
class AetherTargetRollingWindowSizeMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
AUTO = "Auto"
CUSTOM = "Custom"
class AetherTaskType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
CLASSIFICATION = "Classification"
REGRESSION = "Regression"
FORECASTING = "Forecasting"
IMAGE_CLASSIFICATION = "ImageClassification"
IMAGE_CLASSIFICATION_MULTILABEL = "ImageClassificationMultilabel"
IMAGE_OBJECT_DETECTION = "ImageObjectDetection"
IMAGE_INSTANCE_SEGMENTATION = "ImageInstanceSegmentation"
TEXT_CLASSIFICATION = "TextClassification"
TEXT_MULTI_LABELING = "TextMultiLabeling"
TEXT_NER = "TextNER"
TEXT_CLASSIFICATION_MULTILABEL = "TextClassificationMultilabel"
class AetherTrainingOutputType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
METRICS = "Metrics"
MODEL = "Model"
class AetherUIScriptLanguageEnum(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
PYTHON = "Python"
R = "R"
JSON = "Json"
SQL = "Sql"
class AetherUIWidgetTypeEnum(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
DEFAULT = "Default"
MODE = "Mode"
COLUMN_PICKER = "ColumnPicker"
CREDENTIAL = "Credential"
SCRIPT = "Script"
COMPUTE_SELECTION = "ComputeSelection"
JSON_EDITOR = "JsonEditor"
SEARCH_SPACE_PARAMETER = "SearchSpaceParameter"
SECTION_TOGGLE = "SectionToggle"
YAML_EDITOR = "YamlEditor"
ENABLE_RUNTIME_SWEEP = "EnableRuntimeSweep"
DATA_STORE_SELECTION = "DataStoreSelection"
INSTANCE_TYPE_SELECTION = "InstanceTypeSelection"
CONNECTION_SELECTION = "ConnectionSelection"
PROMPT_FLOW_CONNECTION_SELECTION = "PromptFlowConnectionSelection"
AZURE_OPEN_AI_DEPLOYMENT_NAME_SELECTION = "AzureOpenAIDeploymentNameSelection"
class AetherUploadState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
UPLOADING = "Uploading"
COMPLETED = "Completed"
CANCELED = "Canceled"
FAILED = "Failed"
class AetherUseStl(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
SEASON = "Season"
SEASON_TREND = "SeasonTrend"
class AEVAAssetType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
URI_FILE = "UriFile"
URI_FOLDER = "UriFolder"
ML_TABLE = "MLTable"
CUSTOM_MODEL = "CustomModel"
ML_FLOW_MODEL = "MLFlowModel"
TRITON_MODEL = "TritonModel"
OPEN_AI_MODEL = "OpenAIModel"
class AEVADataStoreMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
MOUNT = "Mount"
DOWNLOAD = "Download"
UPLOAD = "Upload"
DIRECT = "Direct"
HDFS = "Hdfs"
LINK = "Link"
class AEVAIdentityType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
USER_IDENTITY = "UserIdentity"
MANAGED = "Managed"
AML_TOKEN = "AMLToken"
class ApplicationEndpointType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
JUPYTER = "Jupyter"
JUPYTER_LAB = "JupyterLab"
SSH = "SSH"
TENSOR_BOARD = "TensorBoard"
VS_CODE = "VSCode"
THEIA = "Theia"
GRAFANA = "Grafana"
CUSTOM = "Custom"
RAY_DASHBOARD = "RayDashboard"
class ArgumentValueType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
LITERAL = "Literal"
PARAMETER = "Parameter"
INPUT = "Input"
OUTPUT = "Output"
NESTED_LIST = "NestedList"
STRING_INTERPOLATION_LIST = "StringInterpolationList"
class AssetScopeTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
WORKSPACE = "Workspace"
GLOBAL_ENUM = "Global"
ALL = "All"
FEED = "Feed"
class AssetSourceType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "Unknown"
LOCAL = "Local"
GITHUB_FILE = "GithubFile"
GITHUB_FOLDER = "GithubFolder"
DEVOPS_ARTIFACTS_ZIP = "DevopsArtifactsZip"
class AssetType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
COMPONENT = "Component"
MODEL = "Model"
ENVIRONMENT = "Environment"
DATASET = "Dataset"
DATA_STORE = "DataStore"
SAMPLE_GRAPH = "SampleGraph"
FLOW_TOOL = "FlowTool"
FLOW_TOOL_SETTING = "FlowToolSetting"
FLOW_CONNECTION = "FlowConnection"
FLOW_SAMPLE = "FlowSample"
FLOW_RUNTIME_SPEC = "FlowRuntimeSpec"
class AutoDeleteCondition(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
CREATED_GREATER_THAN = "CreatedGreaterThan"
LAST_ACCESSED_GREATER_THAN = "LastAccessedGreaterThan"
class BuildContextLocationType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
GIT = "Git"
STORAGE_ACCOUNT = "StorageAccount"
class Communicator(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
PARAMETER_SERVER = "ParameterServer"
GLOO = "Gloo"
MPI = "Mpi"
NCCL = "Nccl"
PARALLEL_TASK = "ParallelTask"
class ComponentRegistrationTypeEnum(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NORMAL = "Normal"
ANONYMOUS_AML_MODULE = "AnonymousAmlModule"
ANONYMOUS_AML_MODULE_VERSION = "AnonymousAmlModuleVersion"
MODULE_ENTITY_ONLY = "ModuleEntityOnly"
class ComponentType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "Unknown"
COMMAND_COMPONENT = "CommandComponent"
COMMAND = "Command"
class ComputeEnvironmentType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ACI = "ACI"
AKS = "AKS"
AMLCOMPUTE = "AMLCOMPUTE"
IOT = "IOT"
AKSENDPOINT = "AKSENDPOINT"
MIRSINGLEMODEL = "MIRSINGLEMODEL"
MIRAMLCOMPUTE = "MIRAMLCOMPUTE"
MIRGA = "MIRGA"
AMLARC = "AMLARC"
BATCHAMLCOMPUTE = "BATCHAMLCOMPUTE"
UNKNOWN = "UNKNOWN"
class ComputeTargetType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
LOCAL = "Local"
REMOTE = "Remote"
HDI_CLUSTER = "HdiCluster"
CONTAINER_INSTANCE = "ContainerInstance"
AML_COMPUTE = "AmlCompute"
COMPUTE_INSTANCE = "ComputeInstance"
CMK8_S = "Cmk8s"
SYNAPSE_SPARK = "SynapseSpark"
KUBERNETES = "Kubernetes"
AISC = "Aisc"
GLOBAL_JOB_DISPATCHER = "GlobalJobDispatcher"
DATABRICKS = "Databricks"
MOCKED_COMPUTE = "MockedCompute"
class ComputeType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
BATCH_AI = "BatchAi"
MLC = "MLC"
HDI_CLUSTER = "HdiCluster"
REMOTE_DOCKER = "RemoteDocker"
DATABRICKS = "Databricks"
AISC = "Aisc"
class ConfigValueType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
STRING = "String"
SECRET = "Secret"
class ConnectionCategory(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
PYTHON_FEED = "PythonFeed"
ACR = "ACR"
GIT = "Git"
S3 = "S3"
SNOWFLAKE = "Snowflake"
AZURE_SQL_DB = "AzureSqlDb"
AZURE_SYNAPSE_ANALYTICS = "AzureSynapseAnalytics"
AZURE_MY_SQL_DB = "AzureMySqlDb"
AZURE_POSTGRES_DB = "AzurePostgresDb"
AZURE_DATA_LAKE_GEN2 = "AzureDataLakeGen2"
REDIS = "Redis"
API_KEY = "ApiKey"
AZURE_OPEN_AI = "AzureOpenAI"
COGNITIVE_SEARCH = "CognitiveSearch"
COGNITIVE_SERVICE = "CognitiveService"
CUSTOM_KEYS = "CustomKeys"
AZURE_BLOB = "AzureBlob"
AZURE_ONE_LAKE = "AzureOneLake"
COSMOS_DB = "CosmosDb"
COSMOS_DB_MONGO_DB_API = "CosmosDbMongoDbApi"
AZURE_DATA_EXPLORER = "AzureDataExplorer"
AZURE_MARIA_DB = "AzureMariaDb"
AZURE_DATABRICKS_DELTA_LAKE = "AzureDatabricksDeltaLake"
AZURE_SQL_MI = "AzureSqlMi"
AZURE_TABLE_STORAGE = "AzureTableStorage"
AMAZON_RDS_FOR_ORACLE = "AmazonRdsForOracle"
AMAZON_RDS_FOR_SQL_SERVER = "AmazonRdsForSqlServer"
AMAZON_REDSHIFT = "AmazonRedshift"
DB2 = "Db2"
DRILL = "Drill"
GOOGLE_BIG_QUERY = "GoogleBigQuery"
GREENPLUM = "Greenplum"
HBASE = "Hbase"
HIVE = "Hive"
IMPALA = "Impala"
INFORMIX = "Informix"
MARIA_DB = "MariaDb"
MICROSOFT_ACCESS = "MicrosoftAccess"
MY_SQL = "MySql"
NETEZZA = "Netezza"
ORACLE = "Oracle"
PHOENIX = "Phoenix"
POSTGRE_SQL = "PostgreSql"
PRESTO = "Presto"
SAP_OPEN_HUB = "SapOpenHub"
SAP_BW = "SapBw"
SAP_HANA = "SapHana"
SAP_TABLE = "SapTable"
SPARK = "Spark"
SQL_SERVER = "SqlServer"
SYBASE = "Sybase"
TERADATA = "Teradata"
VERTICA = "Vertica"
CASSANDRA = "Cassandra"
COUCHBASE = "Couchbase"
MONGO_DB_V2 = "MongoDbV2"
MONGO_DB_ATLAS = "MongoDbAtlas"
AMAZON_S3_COMPATIBLE = "AmazonS3Compatible"
FILE_SERVER = "FileServer"
FTP_SERVER = "FtpServer"
GOOGLE_CLOUD_STORAGE = "GoogleCloudStorage"
HDFS = "Hdfs"
ORACLE_CLOUD_STORAGE = "OracleCloudStorage"
SFTP = "Sftp"
GENERIC_HTTP = "GenericHttp"
O_DATA_REST = "ODataRest"
ODBC = "Odbc"
GENERIC_REST = "GenericRest"
AMAZON_MWS = "AmazonMws"
CONCUR = "Concur"
DYNAMICS = "Dynamics"
DYNAMICS_AX = "DynamicsAx"
DYNAMICS_CRM = "DynamicsCrm"
GOOGLE_AD_WORDS = "GoogleAdWords"
HUBSPOT = "Hubspot"
JIRA = "Jira"
MAGENTO = "Magento"
MARKETO = "Marketo"
OFFICE365 = "Office365"
ELOQUA = "Eloqua"
RESPONSYS = "Responsys"
ORACLE_SERVICE_CLOUD = "OracleServiceCloud"
PAY_PAL = "PayPal"
QUICK_BOOKS = "QuickBooks"
SALESFORCE = "Salesforce"
SALESFORCE_SERVICE_CLOUD = "SalesforceServiceCloud"
SALESFORCE_MARKETING_CLOUD = "SalesforceMarketingCloud"
SAP_CLOUD_FOR_CUSTOMER = "SapCloudForCustomer"
SAP_ECC = "SapEcc"
SERVICE_NOW = "ServiceNow"
SHARE_POINT_ONLINE_LIST = "SharePointOnlineList"
SHOPIFY = "Shopify"
SQUARE = "Square"
WEB_TABLE = "WebTable"
XERO = "Xero"
ZOHO = "Zoho"
GENERIC_CONTAINER_REGISTRY = "GenericContainerRegistry"
class ConnectionScope(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
USER = "User"
WORKSPACE_SHARED = "WorkspaceShared"
class ConnectionSourceType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NODE = "Node"
NODE_INPUT = "NodeInput"
class ConnectionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
OPEN_AI = "OpenAI"
AZURE_OPEN_AI = "AzureOpenAI"
SERP = "Serp"
BING = "Bing"
AZURE_CONTENT_MODERATOR = "AzureContentModerator"
CUSTOM = "Custom"
AZURE_CONTENT_SAFETY = "AzureContentSafety"
COGNITIVE_SEARCH = "CognitiveSearch"
SUBSTRATE_LLM = "SubstrateLLM"
PINECONE = "Pinecone"
QDRANT = "Qdrant"
WEAVIATE = "Weaviate"
FORM_RECOGNIZER = "FormRecognizer"
class ConsumeMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
REFERENCE = "Reference"
COPY = "Copy"
COPY_AND_AUTO_UPGRADE = "CopyAndAutoUpgrade"
class ControlFlowType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
DO_WHILE = "DoWhile"
PARALLEL_FOR = "ParallelFor"
class ControlInputValue(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
FALSE = "False"
TRUE = "True"
SKIPPED = "Skipped"
class DataBindingMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
MOUNT = "Mount"
DOWNLOAD = "Download"
UPLOAD = "Upload"
READ_ONLY_MOUNT = "ReadOnlyMount"
READ_WRITE_MOUNT = "ReadWriteMount"
DIRECT = "Direct"
EVAL_MOUNT = "EvalMount"
EVAL_DOWNLOAD = "EvalDownload"
class DataCategory(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ALL = "All"
DATASET = "Dataset"
MODEL = "Model"
class DataCopyMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
MERGE_WITH_OVERWRITE = "MergeWithOverwrite"
FAIL_IF_CONFLICT = "FailIfConflict"
class DataLocationStorageType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
AZURE_BLOB = "AzureBlob"
ARTIFACT = "Artifact"
SNAPSHOT = "Snapshot"
SAVED_AML_DATASET = "SavedAmlDataset"
ASSET = "Asset"
class DataPortType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
INPUT = "Input"
OUTPUT = "Output"
class DataReferenceType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
AZURE_BLOB = "AzureBlob"
AZURE_DATA_LAKE = "AzureDataLake"
AZURE_FILES = "AzureFiles"
AZURE_SQL_DATABASE = "AzureSqlDatabase"
AZURE_POSTGRES_DATABASE = "AzurePostgresDatabase"
AZURE_DATA_LAKE_GEN2 = "AzureDataLakeGen2"
DBFS = "DBFS"
AZURE_MY_SQL_DATABASE = "AzureMySqlDatabase"
CUSTOM = "Custom"
HDFS = "Hdfs"
class DatasetAccessModes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
DEFAULT = "Default"
DATASET_IN_DPV2 = "DatasetInDpv2"
ASSET_IN_DPV2 = "AssetInDpv2"
DATASET_IN_DESIGNER_UI = "DatasetInDesignerUI"
DATASET_IN_DPV2_WITH_DATASET_IN_DESIGNER_UI = "DatasetInDpv2WithDatasetInDesignerUI"
DATASET = "Dataset"
ASSET_IN_DPV2_WITH_DATASET_IN_DESIGNER_UI = "AssetInDpv2WithDatasetInDesignerUI"
DATASET_AND_ASSET_IN_DPV2_WITH_DATASET_IN_DESIGNER_UI = "DatasetAndAssetInDpv2WithDatasetInDesignerUI"
ASSET_IN_DESIGNER_UI = "AssetInDesignerUI"
ASSET_IN_DPV2_WITH_ASSET_IN_DESIGNER_UI = "AssetInDpv2WithAssetInDesignerUI"
ASSET = "Asset"
class DatasetConsumptionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
RUN_INPUT = "RunInput"
REFERENCE = "Reference"
class DatasetDeliveryMechanism(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
DIRECT = "Direct"
MOUNT = "Mount"
DOWNLOAD = "Download"
HDFS = "Hdfs"
class DatasetOutputType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
RUN_OUTPUT = "RunOutput"
REFERENCE = "Reference"
class DatasetType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
FILE = "File"
TABULAR = "Tabular"
class DataSourceType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
PIPELINE_DATA_SOURCE = "PipelineDataSource"
AML_DATASET = "AmlDataset"
GLOBAL_DATASET = "GlobalDataset"
FEED_MODEL = "FeedModel"
FEED_DATASET = "FeedDataset"
AML_DATA_VERSION = "AmlDataVersion"
AML_MODEL_VERSION = "AMLModelVersion"
class DataStoreMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
MOUNT = "Mount"
DOWNLOAD = "Download"
UPLOAD = "Upload"
class DataTransferStorageType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
DATA_BASE = "DataBase"
FILE_SYSTEM = "FileSystem"
class DataTransferTaskType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
IMPORT_DATA = "ImportData"
EXPORT_DATA = "ExportData"
COPY_DATA = "CopyData"
class DataTypeMechanism(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ERROR_WHEN_NOT_EXISTING = "ErrorWhenNotExisting"
REGISTER_WHEN_NOT_EXISTING = "RegisterWhenNotExisting"
REGISTER_BUILDIN_DATA_TYPE_ONLY = "RegisterBuildinDataTypeOnly"
class DeliveryMechanism(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
DIRECT = "Direct"
MOUNT = "Mount"
DOWNLOAD = "Download"
HDFS = "Hdfs"
class DistributionParameterEnum(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
TEXT = "Text"
NUMBER = "Number"
class DistributionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
PY_TORCH = "PyTorch"
TENSOR_FLOW = "TensorFlow"
MPI = "Mpi"
RAY = "Ray"
class EarlyTerminationPolicyType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
BANDIT = "Bandit"
MEDIAN_STOPPING = "MedianStopping"
TRUNCATION_SELECTION = "TruncationSelection"
class EmailNotificationEnableType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
JOB_COMPLETED = "JobCompleted"
JOB_FAILED = "JobFailed"
JOB_CANCELLED = "JobCancelled"
class EndpointAuthMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
AML_TOKEN = "AMLToken"
KEY = "Key"
AAD_TOKEN = "AADToken"
class EntityKind(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
INVALID = "Invalid"
LINEAGE_ROOT = "LineageRoot"
VERSIONED = "Versioned"
UNVERSIONED = "Unversioned"
class EntityStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ACTIVE = "Active"
DEPRECATED = "Deprecated"
DISABLED = "Disabled"
class ErrorHandlingMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
DEFAULT_INTERPOLATION = "DefaultInterpolation"
CUSTOMER_FACING_INTERPOLATION = "CustomerFacingInterpolation"
class ExecutionPhase(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
EXECUTION = "Execution"
INITIALIZATION = "Initialization"
FINALIZATION = "Finalization"
class FeaturizationMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
AUTO = "Auto"
CUSTOM = "Custom"
OFF = "Off"
class FlowFeatureStateEnum(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
READY = "Ready"
E2_E_TEST = "E2ETest"
class FlowLanguage(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
PYTHON = "Python"
C_SHARP = "CSharp"
class FlowPatchOperationType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ARCHIVE_FLOW = "ArchiveFlow"
RESTORE_FLOW = "RestoreFlow"
EXPORT_FLOW_TO_FILE = "ExportFlowToFile"
class FlowRunMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
FLOW = "Flow"
SINGLE_NODE = "SingleNode"
FROM_NODE = "FromNode"
BULK_TEST = "BulkTest"
EVAL = "Eval"
PAIRWISE_EVAL = "PairwiseEval"
class FlowRuntimeSubmissionApiVersion(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
VERSION1 = "Version1"
VERSION2 = "Version2"
class FlowRunTypeEnum(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
FLOW_RUN = "FlowRun"
EVALUATION_RUN = "EvaluationRun"
PAIRWISE_EVALUATION_RUN = "PairwiseEvaluationRun"
SINGLE_NODE_RUN = "SingleNodeRun"
FROM_NODE_RUN = "FromNodeRun"
class FlowTestMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
SYNC = "Sync"
ASYNC_ENUM = "Async"
class FlowType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
DEFAULT = "Default"
EVALUATION = "Evaluation"
CHAT = "Chat"
RAG = "Rag"
class ForecastHorizonMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
AUTO = "Auto"
CUSTOM = "Custom"
class Framework(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
PYTHON = "Python"
PY_SPARK = "PySpark"
CNTK = "Cntk"
TENSOR_FLOW = "TensorFlow"
PY_TORCH = "PyTorch"
PY_SPARK_INTERACTIVE = "PySparkInteractive"
R = "R"
class Frequency(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
MONTH = "Month"
WEEK = "Week"
DAY = "Day"
HOUR = "Hour"
MINUTE = "Minute"
class GlobalJobDispatcherSupportedComputeType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
AML_COMPUTE = "AmlCompute"
AML_K8_S = "AmlK8s"
class GraphComponentsMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NORMAL = "Normal"
ALL_DESIGNER_BUILDIN = "AllDesignerBuildin"
CONTAINS_DESIGNER_BUILDIN = "ContainsDesignerBuildin"
class GraphDatasetsLoadModes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
SKIP_DATASETS_LOAD = "SkipDatasetsLoad"
V1_REGISTERED_DATASET = "V1RegisteredDataset"
V1_SAVED_DATASET = "V1SavedDataset"
PERSIST_DATASETS_INFO = "PersistDatasetsInfo"
SUBMISSION_NEEDED_UPSTREAM_DATASET_ONLY = "SubmissionNeededUpstreamDatasetOnly"
SUBMISSION_NEEDED_IN_COMPLETE_DATASET_ONLY = "SubmissionNeededInCompleteDatasetOnly"
V2_ASSET = "V2Asset"
SUBMISSION = "Submission"
ALL_REGISTERED_DATA = "AllRegisteredData"
ALL_DATA = "AllData"
class GraphSdkCodeType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
PYTHON = "Python"
JUPYTER_NOTEBOOK = "JupyterNotebook"
UNKNOWN = "Unknown"
class HttpStatusCode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
CONTINUE_ENUM = "Continue"
SWITCHING_PROTOCOLS = "SwitchingProtocols"
PROCESSING = "Processing"
EARLY_HINTS = "EarlyHints"
OK = "OK"
CREATED = "Created"
ACCEPTED = "Accepted"
NON_AUTHORITATIVE_INFORMATION = "NonAuthoritativeInformation"
NO_CONTENT = "NoContent"
RESET_CONTENT = "ResetContent"
PARTIAL_CONTENT = "PartialContent"
MULTI_STATUS = "MultiStatus"
ALREADY_REPORTED = "AlreadyReported"
IM_USED = "IMUsed"
MULTIPLE_CHOICES = "MultipleChoices"
AMBIGUOUS = "Ambiguous"
MOVED_PERMANENTLY = "MovedPermanently"
MOVED = "Moved"
FOUND = "Found"
REDIRECT = "Redirect"
SEE_OTHER = "SeeOther"
REDIRECT_METHOD = "RedirectMethod"
NOT_MODIFIED = "NotModified"
USE_PROXY = "UseProxy"
UNUSED = "Unused"
TEMPORARY_REDIRECT = "TemporaryRedirect"
REDIRECT_KEEP_VERB = "RedirectKeepVerb"
PERMANENT_REDIRECT = "PermanentRedirect"
BAD_REQUEST = "BadRequest"
UNAUTHORIZED = "Unauthorized"
PAYMENT_REQUIRED = "PaymentRequired"
FORBIDDEN = "Forbidden"
NOT_FOUND = "NotFound"
METHOD_NOT_ALLOWED = "MethodNotAllowed"
NOT_ACCEPTABLE = "NotAcceptable"
PROXY_AUTHENTICATION_REQUIRED = "ProxyAuthenticationRequired"
REQUEST_TIMEOUT = "RequestTimeout"
CONFLICT = "Conflict"
GONE = "Gone"
LENGTH_REQUIRED = "LengthRequired"
PRECONDITION_FAILED = "PreconditionFailed"
REQUEST_ENTITY_TOO_LARGE = "RequestEntityTooLarge"
REQUEST_URI_TOO_LONG = "RequestUriTooLong"
UNSUPPORTED_MEDIA_TYPE = "UnsupportedMediaType"
REQUESTED_RANGE_NOT_SATISFIABLE = "RequestedRangeNotSatisfiable"
EXPECTATION_FAILED = "ExpectationFailed"
MISDIRECTED_REQUEST = "MisdirectedRequest"
UNPROCESSABLE_ENTITY = "UnprocessableEntity"
LOCKED = "Locked"
FAILED_DEPENDENCY = "FailedDependency"
UPGRADE_REQUIRED = "UpgradeRequired"
PRECONDITION_REQUIRED = "PreconditionRequired"
TOO_MANY_REQUESTS = "TooManyRequests"
REQUEST_HEADER_FIELDS_TOO_LARGE = "RequestHeaderFieldsTooLarge"
UNAVAILABLE_FOR_LEGAL_REASONS = "UnavailableForLegalReasons"
INTERNAL_SERVER_ERROR = "InternalServerError"
NOT_IMPLEMENTED = "NotImplemented"
BAD_GATEWAY = "BadGateway"
SERVICE_UNAVAILABLE = "ServiceUnavailable"
GATEWAY_TIMEOUT = "GatewayTimeout"
HTTP_VERSION_NOT_SUPPORTED = "HttpVersionNotSupported"
VARIANT_ALSO_NEGOTIATES = "VariantAlsoNegotiates"
INSUFFICIENT_STORAGE = "InsufficientStorage"
LOOP_DETECTED = "LoopDetected"
NOT_EXTENDED = "NotExtended"
NETWORK_AUTHENTICATION_REQUIRED = "NetworkAuthenticationRequired"
class IdentityType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
MANAGED = "Managed"
SERVICE_PRINCIPAL = "ServicePrincipal"
AML_TOKEN = "AMLToken"
class InputType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
DEFAULT = "default"
UIONLY_HIDDEN = "uionly_hidden"
class IntellectualPropertyAccessMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
READ_ONLY = "ReadOnly"
READ_WRITE = "ReadWrite"
class JobInputType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
DATASET = "Dataset"
URI = "Uri"
LITERAL = "Literal"
URI_FILE = "UriFile"
URI_FOLDER = "UriFolder"
ML_TABLE = "MLTable"
CUSTOM_MODEL = "CustomModel"
ML_FLOW_MODEL = "MLFlowModel"
TRITON_MODEL = "TritonModel"
class JobLimitsType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
COMMAND = "Command"
SWEEP = "Sweep"
class JobOutputType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
URI = "Uri"
DATASET = "Dataset"
URI_FILE = "UriFile"
URI_FOLDER = "UriFolder"
ML_TABLE = "MLTable"
CUSTOM_MODEL = "CustomModel"
ML_FLOW_MODEL = "MLFlowModel"
TRITON_MODEL = "TritonModel"
class JobProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CANCELED = "Canceled"
IN_PROGRESS = "InProgress"
class JobStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NOT_STARTED = "NotStarted"
STARTING = "Starting"
PROVISIONING = "Provisioning"
PREPARING = "Preparing"
QUEUED = "Queued"
RUNNING = "Running"
FINALIZING = "Finalizing"
CANCEL_REQUESTED = "CancelRequested"
COMPLETED = "Completed"
FAILED = "Failed"
CANCELED = "Canceled"
NOT_RESPONDING = "NotResponding"
PAUSED = "Paused"
UNKNOWN = "Unknown"
SCHEDULED = "Scheduled"
class JobType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
COMMAND = "Command"
SWEEP = "Sweep"
LABELING = "Labeling"
PIPELINE = "Pipeline"
DATA = "Data"
AUTO_ML = "AutoML"
SPARK = "Spark"
BASE = "Base"
class KeyType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
PRIMARY = "Primary"
SECONDARY = "Secondary"
class ListViewType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ACTIVE_ONLY = "ActiveOnly"
ARCHIVED_ONLY = "ArchivedOnly"
ALL = "All"
class LogLevel(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
TRACE = "Trace"
DEBUG = "Debug"
INFORMATION = "Information"
WARNING = "Warning"
ERROR = "Error"
CRITICAL = "Critical"
NONE = "None"
class LogVerbosity(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NOT_SET = "NotSet"
DEBUG = "Debug"
INFO = "Info"
WARNING = "Warning"
ERROR = "Error"
CRITICAL = "Critical"
class LongRunningUpdateType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ENABLE_MODULE = "EnableModule"
DISABLE_MODULE = "DisableModule"
UPDATE_DISPLAY_NAME = "UpdateDisplayName"
UPDATE_DESCRIPTION = "UpdateDescription"
UPDATE_TAGS = "UpdateTags"
class ManagedServiceIdentityType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssignedUserAssigned"
NONE = "None"
class MetricValueType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
INT = "Int"
DOUBLE = "Double"
STRING = "String"
BOOL = "Bool"
ARTIFACT = "Artifact"
HISTOGRAM = "Histogram"
MALFORMED = "Malformed"
class MfeInternalIdentityType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
MANAGED = "Managed"
AML_TOKEN = "AMLToken"
USER_IDENTITY = "UserIdentity"
class MfeInternalMLFlowAutologgerState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ENABLED = "Enabled"
DISABLED = "Disabled"
class MfeInternalScheduleStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ENABLED = "Enabled"
DISABLED = "Disabled"
class MLFlowAutologgerState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ENABLED = "Enabled"
DISABLED = "Disabled"
class ModuleDtoFields(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
DEFINITION = "Definition"
YAML_STR = "YamlStr"
REGISTRATION_CONTEXT = "RegistrationContext"
RUN_SETTING_PARAMETERS = "RunSettingParameters"
RUN_DEFINITION = "RunDefinition"
ALL = "All"
DEFAULT = "Default"
BASIC = "Basic"
MINIMAL = "Minimal"
class ModuleInfoFromYamlStatusEnum(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NEW_MODULE = "NewModule"
NEW_VERSION = "NewVersion"
CONFLICT = "Conflict"
PARSE_ERROR = "ParseError"
PROCESS_REQUEST_ERROR = "ProcessRequestError"
class ModuleRunSettingTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ALL = "All"
RELEASED = "Released"
DEFAULT = "Default"
TESTING = "Testing"
LEGACY = "Legacy"
PREVIEW = "Preview"
UX_FULL = "UxFull"
INTEGRATION = "Integration"
UX_INTEGRATION = "UxIntegration"
FULL = "Full"
class ModuleScope(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ALL = "All"
GLOBAL_ENUM = "Global"
WORKSPACE = "Workspace"
ANONYMOUS = "Anonymous"
STEP = "Step"
DRAFT = "Draft"
FEED = "Feed"
REGISTRY = "Registry"
SYSTEM_AUTO_CREATED = "SystemAutoCreated"
class ModuleSourceType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "Unknown"
LOCAL = "Local"
GITHUB_FILE = "GithubFile"
GITHUB_FOLDER = "GithubFolder"
DEVOPS_ARTIFACTS_ZIP = "DevopsArtifactsZip"
SERIALIZED_MODULE_INFO = "SerializedModuleInfo"
class ModuleType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
BATCH_INFERENCING = "BatchInferencing"
class ModuleUpdateOperationType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
SET_DEFAULT_VERSION = "SetDefaultVersion"
ENABLE_MODULE = "EnableModule"
DISABLE_MODULE = "DisableModule"
UPDATE_DISPLAY_NAME = "UpdateDisplayName"
UPDATE_DESCRIPTION = "UpdateDescription"
UPDATE_TAGS = "UpdateTags"
class ModuleWorkingMechanism(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NORMAL = "Normal"
OUTPUT_TO_DATASET = "OutputToDataset"
class NCrossValidationMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
AUTO = "Auto"
CUSTOM = "Custom"
class NodeCompositionMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
ONLY_SEQUENTIAL = "OnlySequential"
FULL = "Full"
class NodesValueType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ALL = "All"
CUSTOM = "Custom"
class Orientation(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
HORIZONTAL = "Horizontal"
VERTICAL = "Vertical"
class OutputMechanism(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
UPLOAD = "Upload"
MOUNT = "Mount"
HDFS = "Hdfs"
LINK = "Link"
DIRECT = "Direct"
class ParameterType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
INT = "Int"
DOUBLE = "Double"
BOOL = "Bool"
STRING = "String"
UNDEFINED = "Undefined"
class ParameterValueType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
LITERAL = "Literal"
GRAPH_PARAMETER_NAME = "GraphParameterName"
CONCATENATE = "Concatenate"
INPUT = "Input"
DATA_PATH = "DataPath"
DATA_SET_DEFINITION = "DataSetDefinition"
class PipelineDraftMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
NORMAL = "Normal"
CUSTOM = "Custom"
class PipelineRunStatusCode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NOT_STARTED = "NotStarted"
RUNNING = "Running"
FAILED = "Failed"
FINISHED = "Finished"
CANCELED = "Canceled"
QUEUED = "Queued"
CANCEL_REQUESTED = "CancelRequested"
class PipelineStatusCode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NOT_STARTED = "NotStarted"
IN_DRAFT = "InDraft"
PREPARING = "Preparing"
RUNNING = "Running"
FAILED = "Failed"
FINISHED = "Finished"
CANCELED = "Canceled"
THROTTLED = "Throttled"
UNKNOWN = "Unknown"
class PipelineType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
TRAINING_PIPELINE = "TrainingPipeline"
REAL_TIME_INFERENCE_PIPELINE = "RealTimeInferencePipeline"
BATCH_INFERENCE_PIPELINE = "BatchInferencePipeline"
UNKNOWN = "Unknown"
class PortAction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
PROMOTE = "Promote"
VIEW_IN_DATA_STORE = "ViewInDataStore"
VISUALIZE = "Visualize"
GET_SCHEMA = "GetSchema"
CREATE_INFERENCE_GRAPH = "CreateInferenceGraph"
REGISTER_MODEL = "RegisterModel"
PROMOTE_AS_TABULAR = "PromoteAsTabular"
class PrimaryMetrics(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
AUC_WEIGHTED = "AUCWeighted"
ACCURACY = "Accuracy"
NORM_MACRO_RECALL = "NormMacroRecall"
AVERAGE_PRECISION_SCORE_WEIGHTED = "AveragePrecisionScoreWeighted"
PRECISION_SCORE_WEIGHTED = "PrecisionScoreWeighted"
SPEARMAN_CORRELATION = "SpearmanCorrelation"
NORMALIZED_ROOT_MEAN_SQUARED_ERROR = "NormalizedRootMeanSquaredError"
R2_SCORE = "R2Score"
NORMALIZED_MEAN_ABSOLUTE_ERROR = "NormalizedMeanAbsoluteError"
NORMALIZED_ROOT_MEAN_SQUARED_LOG_ERROR = "NormalizedRootMeanSquaredLogError"
MEAN_AVERAGE_PRECISION = "MeanAveragePrecision"
IOU = "Iou"
class ProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "Unknown"
UPDATING = "Updating"
CREATING = "Creating"
DELETING = "Deleting"
ACCEPTED = "Accepted"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CANCELED = "Canceled"
class RealTimeEndpointInternalStepCode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ABOUT_TO_DEPLOY = "AboutToDeploy"
WAIT_AKS_COMPUTE_READY = "WaitAksComputeReady"
REGISTER_MODELS = "RegisterModels"
CREATE_SERVICE_FROM_MODELS = "CreateServiceFromModels"
UPDATE_SERVICE_FROM_MODELS = "UpdateServiceFromModels"
WAIT_SERVICE_CREATING = "WaitServiceCreating"
FETCH_SERVICE_RELATED_INFO = "FetchServiceRelatedInfo"
TEST_WITH_SAMPLE_DATA = "TestWithSampleData"
ABOUT_TO_DELETE = "AboutToDelete"
DELETE_DEPLOYMENT = "DeleteDeployment"
DELETE_ASSET = "DeleteAsset"
DELETE_IMAGE = "DeleteImage"
DELETE_MODEL = "DeleteModel"
DELETE_SERVICE_RECORD = "DeleteServiceRecord"
class RealTimeEndpointOpCode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
CREATE = "Create"
UPDATE = "Update"
DELETE = "Delete"
class RealTimeEndpointOpStatusCode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ONGOING = "Ongoing"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
SUCCEEDED_WITH_WARNING = "SucceededWithWarning"
class RecurrenceFrequency(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
MINUTE = "Minute"
HOUR = "Hour"
DAY = "Day"
WEEK = "Week"
MONTH = "Month"
class RunDisplayNameGenerationType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
AUTO_APPEND = "AutoAppend"
USER_PROVIDED_MACRO = "UserProvidedMacro"
class RunSettingParameterType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
UNDEFINED = "Undefined"
INT = "Int"
DOUBLE = "Double"
BOOL = "Bool"
STRING = "String"
JSON_STRING = "JsonString"
YAML_STRING = "YamlString"
STRING_LIST = "StringList"
class RunSettingUIWidgetTypeEnum(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
DEFAULT = "Default"
COMPUTE_SELECTION = "ComputeSelection"
JSON_EDITOR = "JsonEditor"
MODE = "Mode"
SEARCH_SPACE_PARAMETER = "SearchSpaceParameter"
SECTION_TOGGLE = "SectionToggle"
YAML_EDITOR = "YamlEditor"
ENABLE_RUNTIME_SWEEP = "EnableRuntimeSweep"
DATA_STORE_SELECTION = "DataStoreSelection"
CHECKBOX = "Checkbox"
MULTIPLE_SELECTION = "MultipleSelection"
HYPERPARAMETER_CONFIGURATION = "HyperparameterConfiguration"
JSON_TEXT_BOX = "JsonTextBox"
CONNECTION = "Connection"
STATIC = "Static"
class RunStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NOT_STARTED = "NotStarted"
UNAPPROVED = "Unapproved"
PAUSING = "Pausing"
PAUSED = "Paused"
STARTING = "Starting"
PREPARING = "Preparing"
QUEUED = "Queued"
RUNNING = "Running"
FINALIZING = "Finalizing"
CANCEL_REQUESTED = "CancelRequested"
COMPLETED = "Completed"
FAILED = "Failed"
CANCELED = "Canceled"
class RuntimeStatusEnum(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
UNAVAILABLE = "Unavailable"
FAILED = "Failed"
NOT_EXIST = "NotExist"
STARTING = "Starting"
STOPPING = "Stopping"
class RuntimeType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
MANAGED_ONLINE_ENDPOINT = "ManagedOnlineEndpoint"
COMPUTE_INSTANCE = "ComputeInstance"
TRAINING_SESSION = "TrainingSession"
class RunType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
HTTP = "HTTP"
SDK = "SDK"
SCHEDULE = "Schedule"
PORTAL = "Portal"
class SamplingAlgorithmType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
RANDOM = "Random"
GRID = "Grid"
BAYESIAN = "Bayesian"
class ScheduleProvisioningStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
CREATING = "Creating"
UPDATING = "Updating"
DELETING = "Deleting"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CANCELED = "Canceled"
class ScheduleStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ENABLED = "Enabled"
DISABLED = "Disabled"
class ScheduleType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
CRON = "Cron"
RECURRENCE = "Recurrence"
class ScopeType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
GLOBAL_ENUM = "Global"
TENANT = "Tenant"
SUBSCRIPTION = "Subscription"
RESOURCE_GROUP = "ResourceGroup"
WORKSPACE = "Workspace"
class ScriptType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
PYTHON = "Python"
NOTEBOOK = "Notebook"
class SeasonalityMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
AUTO = "Auto"
CUSTOM = "Custom"
class Section(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
GALLERY = "Gallery"
TEMPLATE = "Template"
class SessionSetupModeEnum(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
CLIENT_WAIT = "ClientWait"
SYSTEM_WAIT = "SystemWait"
class SetupFlowSessionAction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
INSTALL = "Install"
RESET = "Reset"
UPDATE = "Update"
DELETE = "Delete"
class SeverityLevel(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
CRITICAL = "Critical"
ERROR = "Error"
WARNING = "Warning"
INFO = "Info"
class ShortSeriesHandlingConfiguration(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
AUTO = "Auto"
PAD = "Pad"
DROP = "Drop"
class StackMetaLearnerType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
LOGISTIC_REGRESSION = "LogisticRegression"
LOGISTIC_REGRESSION_CV = "LogisticRegressionCV"
LIGHT_GBM_CLASSIFIER = "LightGBMClassifier"
ELASTIC_NET = "ElasticNet"
ELASTIC_NET_CV = "ElasticNetCV"
LIGHT_GBM_REGRESSOR = "LightGBMRegressor"
LINEAR_REGRESSION = "LinearRegression"
class StorageAuthType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
MSI = "MSI"
CONNECTION_STRING = "ConnectionString"
SAS = "SAS"
class StoredProcedureParameterType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
STRING = "String"
INT = "Int"
DECIMAL = "Decimal"
GUID = "Guid"
BOOLEAN = "Boolean"
DATE = "Date"
class SuccessfulCommandReturnCode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ZERO = "Zero"
ZERO_OR_GREATER = "ZeroOrGreater"
class TabularTrainingMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
DISTRIBUTED = "Distributed"
NON_DISTRIBUTED = "NonDistributed"
AUTO = "Auto"
class TargetAggregationFunction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
SUM = "Sum"
MAX = "Max"
MIN = "Min"
MEAN = "Mean"
class TargetLagsMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
AUTO = "Auto"
CUSTOM = "Custom"
class TargetRollingWindowSizeMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
AUTO = "Auto"
CUSTOM = "Custom"
class TaskCreationOptions(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
PREFER_FAIRNESS = "PreferFairness"
LONG_RUNNING = "LongRunning"
ATTACHED_TO_PARENT = "AttachedToParent"
DENY_CHILD_ATTACH = "DenyChildAttach"
HIDE_SCHEDULER = "HideScheduler"
RUN_CONTINUATIONS_ASYNCHRONOUSLY = "RunContinuationsAsynchronously"
class TaskStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
CREATED = "Created"
WAITING_FOR_ACTIVATION = "WaitingForActivation"
WAITING_TO_RUN = "WaitingToRun"
RUNNING = "Running"
WAITING_FOR_CHILDREN_TO_COMPLETE = "WaitingForChildrenToComplete"
RAN_TO_COMPLETION = "RanToCompletion"
CANCELED = "Canceled"
FAULTED = "Faulted"
class TaskStatusCode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NOT_STARTED = "NotStarted"
QUEUED = "Queued"
RUNNING = "Running"
FAILED = "Failed"
FINISHED = "Finished"
CANCELED = "Canceled"
PARTIALLY_EXECUTED = "PartiallyExecuted"
BYPASSED = "Bypassed"
class TaskType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
CLASSIFICATION = "Classification"
REGRESSION = "Regression"
FORECASTING = "Forecasting"
IMAGE_CLASSIFICATION = "ImageClassification"
IMAGE_CLASSIFICATION_MULTILABEL = "ImageClassificationMultilabel"
IMAGE_OBJECT_DETECTION = "ImageObjectDetection"
IMAGE_INSTANCE_SEGMENTATION = "ImageInstanceSegmentation"
TEXT_CLASSIFICATION = "TextClassification"
TEXT_MULTI_LABELING = "TextMultiLabeling"
TEXT_NER = "TextNER"
TEXT_CLASSIFICATION_MULTILABEL = "TextClassificationMultilabel"
class ToolFuncCallScenario(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
GENERATED_BY = "generated_by"
REVERSE_GENERATED_BY = "reverse_generated_by"
DYNAMIC_LIST = "dynamic_list"
class ToolState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
STABLE = "Stable"
PREVIEW = "Preview"
DEPRECATED = "Deprecated"
class ToolType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
LLM = "llm"
PYTHON = "python"
ACTION = "action"
PROMPT = "prompt"
CUSTOM_LLM = "custom_llm"
CSHARP = "csharp"
class TrainingOutputType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
METRICS = "Metrics"
MODEL = "Model"
class TriggerOperationType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
CREATE = "Create"
UPDATE = "Update"
DELETE = "Delete"
CREATE_OR_UPDATE = "CreateOrUpdate"
class TriggerType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
RECURRENCE = "Recurrence"
CRON = "Cron"
class UIInputDataDeliveryMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
READ_ONLY_MOUNT = "Read-only mount"
READ_WRITE_MOUNT = "Read-write mount"
DOWNLOAD = "Download"
DIRECT = "Direct"
EVALUATE_MOUNT = "Evaluate mount"
EVALUATE_DOWNLOAD = "Evaluate download"
HDFS = "Hdfs"
class UIScriptLanguageEnum(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
PYTHON = "Python"
R = "R"
JSON = "Json"
SQL = "Sql"
class UIWidgetTypeEnum(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
DEFAULT = "Default"
MODE = "Mode"
COLUMN_PICKER = "ColumnPicker"
CREDENTIAL = "Credential"
SCRIPT = "Script"
COMPUTE_SELECTION = "ComputeSelection"
JSON_EDITOR = "JsonEditor"
SEARCH_SPACE_PARAMETER = "SearchSpaceParameter"
SECTION_TOGGLE = "SectionToggle"
YAML_EDITOR = "YamlEditor"
ENABLE_RUNTIME_SWEEP = "EnableRuntimeSweep"
DATA_STORE_SELECTION = "DataStoreSelection"
INSTANCE_TYPE_SELECTION = "InstanceTypeSelection"
CONNECTION_SELECTION = "ConnectionSelection"
PROMPT_FLOW_CONNECTION_SELECTION = "PromptFlowConnectionSelection"
AZURE_OPEN_AI_DEPLOYMENT_NAME_SELECTION = "AzureOpenAIDeploymentNameSelection"
class UploadState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
UPLOADING = "Uploading"
COMPLETED = "Completed"
CANCELED = "Canceled"
FAILED = "Failed"
class UserType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
USER = "User"
APPLICATION = "Application"
MANAGED_IDENTITY = "ManagedIdentity"
KEY = "Key"
class UseStl(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
SEASON = "Season"
SEASON_TREND = "SeasonTrend"
class ValidationStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
SUCCEEDED = "Succeeded"
FAILED = "Failed"
class ValueType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
INT = "int"
DOUBLE = "double"
BOOL = "bool"
STRING = "string"
SECRET = "secret"
PROMPT_TEMPLATE = "prompt_template"
OBJECT = "object"
LIST = "list"
BING_CONNECTION = "BingConnection"
OPEN_AI_CONNECTION = "OpenAIConnection"
AZURE_OPEN_AI_CONNECTION = "AzureOpenAIConnection"
AZURE_CONTENT_MODERATOR_CONNECTION = "AzureContentModeratorConnection"
CUSTOM_CONNECTION = "CustomConnection"
AZURE_CONTENT_SAFETY_CONNECTION = "AzureContentSafetyConnection"
SERP_CONNECTION = "SerpConnection"
COGNITIVE_SEARCH_CONNECTION = "CognitiveSearchConnection"
SUBSTRATE_LLM_CONNECTION = "SubstrateLLMConnection"
PINECONE_CONNECTION = "PineconeConnection"
QDRANT_CONNECTION = "QdrantConnection"
WEAVIATE_CONNECTION = "WeaviateConnection"
FUNCTION_LIST = "function_list"
FUNCTION_STR = "function_str"
FORM_RECOGNIZER_CONNECTION = "FormRecognizerConnection"
FILE_PATH = "file_path"
IMAGE = "image"
class VmPriority(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
DEDICATED = "Dedicated"
LOWPRIORITY = "Lowpriority"
class WebServiceState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
TRANSITIONING = "Transitioning"
HEALTHY = "Healthy"
UNHEALTHY = "Unhealthy"
FAILED = "Failed"
UNSCHEDULABLE = "Unschedulable"
class Weekday(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
MONDAY = "Monday"
TUESDAY = "Tuesday"
WEDNESDAY = "Wednesday"
THURSDAY = "Thursday"
FRIDAY = "Friday"
SATURDAY = "Saturday"
SUNDAY = "Sunday"
class WeekDays(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
MONDAY = "Monday"
TUESDAY = "Tuesday"
WEDNESDAY = "Wednesday"
THURSDAY = "Thursday"
FRIDAY = "Friday"
SATURDAY = "Saturday"
SUNDAY = "Sunday"
class YarnDeployMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
CLIENT = "Client"
CLUSTER = "Cluster"
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/models/_azure_machine_learning_designer_service_client_enums.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/models/_azure_machine_learning_designer_service_client_enums.py",
"repo_id": "promptflow",
"token_count": 22758
} | 49 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""service_caller.py, module for interacting with the AzureML service."""
import json
import os
import sys
import time
import uuid
from functools import wraps, cached_property
import pydash
from azure.core.exceptions import HttpResponseError, ResourceExistsError
from azure.core.pipeline.policies import RetryPolicy
from promptflow._sdk._telemetry import request_id_context
from promptflow._sdk._telemetry import TelemetryMixin
from promptflow._utils.logger_utils import LoggerFactory
from promptflow.azure._constants._flow import AUTOMATIC_RUNTIME, SESSION_CREATION_TIMEOUT_ENV_VAR
from promptflow.azure._restclient.flow import AzureMachineLearningDesignerServiceClient
from promptflow.azure._utils.gerneral import get_authorization, get_arm_token, get_aml_token
from promptflow.exceptions import UserErrorException, PromptflowException, SystemErrorException
logger = LoggerFactory.get_logger(__name__)
class FlowRequestException(SystemErrorException):
"""FlowRequestException."""
def __init__(self, message, **kwargs):
super().__init__(message, **kwargs)
class RequestTelemetryMixin(TelemetryMixin):
def __init__(self):
super().__init__()
self._refresh_request_id_for_telemetry()
self._from_cli = False
def _get_telemetry_values(self, *args, **kwargs):
return {"request_id": self._request_id, "from_cli": self._from_cli}
def _set_from_cli_for_telemetry(self):
self._from_cli = True
def _refresh_request_id_for_telemetry(self):
# refresh request id from current request id context
self._request_id = request_id_context.get() or str(uuid.uuid4())
def _request_wrapper():
"""Wrapper for request. Will refresh request id and pretty print exception."""
def exception_wrapper(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
if not isinstance(self, RequestTelemetryMixin):
raise PromptflowException(f"Wrapped function is not RequestTelemetryMixin, got {type(self)}")
# refresh request before each request
self._refresh_request_id_for_telemetry()
try:
return func(self, *args, **kwargs)
except HttpResponseError as e:
raise FlowRequestException(
f"Calling {func.__name__} failed with request id: {self._request_id} \n"
f"Status code: {e.status_code} \n"
f"Reason: {e.reason} \n"
f"Error message: {e.message} \n"
)
return wrapper
return exception_wrapper
class FlowServiceCaller(RequestTelemetryMixin):
"""FlowServiceCaller.
:param workspace: workspace
:type workspace: Workspace
:param base_url: base url
:type base_url: Service URL
"""
# The default namespace placeholder is used when namespace is None for get_module API.
DEFAULT_COMPONENT_NAMESPACE_PLACEHOLDER = "-"
DEFAULT_MODULE_WORKING_MECHANISM = "OutputToDataset"
DEFAULT_DATATYPE_MECHANISM = "RegisterBuildinDataTypeOnly"
FLOW_CLUSTER_ADDRESS = "FLOW_CLUSTER_ADDRESS"
WORKSPACE_INDEPENDENT_ENDPOINT_ADDRESS = "WORKSPACE_INDEPENDENT_ENDPOINT_ADDRESS"
DEFAULT_BASE_URL = "https://{}.api.azureml.ms"
MASTER_BASE_API = "https://master.api.azureml-test.ms"
DEFAULT_BASE_REGION = "westus2"
AML_USE_ARM_TOKEN = "AML_USE_ARM_TOKEN"
def __init__(self, workspace, credential, operation_scope, base_url=None, region=None, **kwargs):
"""Initializes DesignerServiceCaller."""
if "get_instance" != sys._getframe().f_back.f_code.co_name:
raise UserErrorException(
"Please use `_FlowServiceCallerFactory.get_instance()` to get service caller "
"instead of creating a new one."
)
super().__init__()
# self._service_context = workspace.service_context
if base_url is None:
# handle vnet scenario, it's discovery url will have workspace id after discovery
base_url = workspace.discovery_url.split("discovery")[0]
# for dev test, change base url with environment variable
base_url = os.environ.get(self.FLOW_CLUSTER_ADDRESS, default=base_url)
self._workspace = workspace
self._operation_scope = operation_scope
self._service_endpoint = base_url
self._credential = credential
retry_policy = RetryPolicy()
# stop retry 500 since it will cause 409 for run creation scenario
retry_policy._retry_on_status_codes.remove(500)
self.caller = AzureMachineLearningDesignerServiceClient(base_url=base_url, retry_policy=retry_policy, **kwargs)
def _get_headers(self):
custom_header = {
"Authorization": get_authorization(credential=self._credential),
"x-ms-client-request-id": self._request_id,
}
return custom_header
def _set_headers_with_user_aml_token(self, headers):
aml_token = get_aml_token(credential=self._credential)
headers["aml-user-token"] = aml_token
def _get_user_identity_info(self):
import jwt
token = get_arm_token(credential=self._credential)
decoded_token = jwt.decode(token, options={"verify_signature": False})
user_object_id, user_tenant_id = decoded_token["oid"], decoded_token["tid"]
return user_object_id, user_tenant_id
@cached_property
def _common_azure_url_pattern(self):
operation_scope = self._operation_scope
pattern = (
f"/subscriptions/{operation_scope.subscription_id}"
f"/resourceGroups/{operation_scope.resource_group_name}"
f"/providers/Microsoft.MachineLearningServices"
f"/workspaces/{operation_scope.workspace_name}"
)
return pattern
@_request_wrapper()
def create_flow(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
experiment_id=None, # type: Optional[str]
body=None, # type: Optional["_models.CreateFlowRequest"]
**kwargs, # type: Any
):
headers = self._get_headers()
return self.caller.flows.create_flow(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
body=body,
headers=headers,
**kwargs,
)
@_request_wrapper()
def create_component_from_flow(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
body=None, # type: Optional["_models.LoadFlowAsComponentRequest"]
**kwargs, # type: Any
):
headers = self._get_headers()
try:
return self.caller.flows.load_as_component(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
body=body,
headers=headers,
**kwargs,
)
except ResourceExistsError:
return (
f"/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}"
f"/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}"
f"/components/{body.component_name}/versions/{body.component_version}"
)
@_request_wrapper()
def list_flows(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
experiment_id=None, # type: Optional[str]
owned_only=None, # type: Optional[bool]
flow_type=None, # type: Optional[Union[str, "_models.FlowType"]]
list_view_type=None, # type: Optional[Union[str, "_models.ListViewType"]]
**kwargs, # type: Any
):
headers = self._get_headers()
return self.caller.flows.list_flows(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
owned_only=owned_only,
flow_type=flow_type,
list_view_type=list_view_type,
headers=headers,
**kwargs,
)
@_request_wrapper()
def submit_flow(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
experiment_id, # type: str
endpoint_name=None, # type: Optional[str]
body=None, # type: Optional["_models.SubmitFlowRequest"]
**kwargs, # type: Any
):
headers = self._get_headers()
return self.caller.flows.submit_flow(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
endpoint_name=endpoint_name,
body=body,
headers=headers,
**kwargs,
)
@_request_wrapper()
def get_flow(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
experiment_id, # type: str
**kwargs, # type: Any
):
headers = self._get_headers()
return self.caller.flows.get_flow(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
flow_id=flow_id,
headers=headers,
**kwargs,
)
@_request_wrapper()
def get_flow_run(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs, # type: Any
):
"""Get flow run."""
headers = self._get_headers()
return self.caller.bulk_runs.get_flow_run_info(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
headers=headers,
**kwargs,
)
@_request_wrapper()
def create_connection(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
body=None, # type: Optional["_models.CreateOrUpdateConnectionRequest"]
**kwargs, # type: Any
):
headers = self._get_headers()
return self.caller.connections.create_connection(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
body=body,
headers=headers,
**kwargs,
)
@_request_wrapper()
def update_connection(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
body=None, # type: Optional["_models.CreateOrUpdateConnectionRequestDto"]
**kwargs, # type: Any
):
headers = self._get_headers()
return self.caller.connections.update_connection(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
body=body,
headers=headers,
**kwargs,
)
@_request_wrapper()
def get_connection(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
**kwargs, # type: Any
):
headers = self._get_headers()
return self.caller.connections.get_connection(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
headers=headers,
**kwargs,
)
@_request_wrapper()
def delete_connection(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
**kwargs, # type: Any
):
headers = self._get_headers()
return self.caller.connections.delete_connection(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
headers=headers,
**kwargs,
)
@_request_wrapper()
def list_connections(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs, # type: Any
):
headers = self._get_headers()
return self.caller.connections.list_connections(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
headers=headers,
**kwargs,
)
@_request_wrapper()
def list_connection_specs(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs, # type: Any
):
headers = self._get_headers()
return self.caller.connections.list_connection_specs(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
headers=headers,
**kwargs,
)
@_request_wrapper()
def submit_bulk_run(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
body=None, # type: Optional["_models.SubmitBulkRunRequest"]
**kwargs, # type: Any
):
"""submit_bulk_run.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param body:
:type body: ~flow.models.SubmitBulkRunRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
headers = self._get_headers()
# pass user aml token to flow run submission
self._set_headers_with_user_aml_token(headers)
return self.caller.bulk_runs.submit_bulk_run(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
headers=headers,
body=body,
**kwargs,
)
@_request_wrapper()
def create_flow_session(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
session_id, # type: str
body, # type: Optional["_models.CreateFlowSessionRequest"]
**kwargs, # type: Any
):
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from promptflow.azure._restclient.flow.operations._flow_sessions_operations import (
build_create_flow_session_request,
_convert_request,
_models,
)
from promptflow.azure._constants._flow import SESSION_CREATION_TIMEOUT_SECONDS
from promptflow.azure._restclient.flow.models import SetupFlowSessionAction
headers = self._get_headers()
# pass user aml token to session create so user don't need to do authentication again in CI
self._set_headers_with_user_aml_token(headers)
# did not call self.caller.flow_sessions.create_flow_session because it does not support return headers
cls = kwargs.pop("cls", None) # type: ClsType[Any]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = self.caller.flow_sessions._serialize.body(body, "CreateFlowSessionRequest")
request = build_create_flow_session_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
session_id=session_id,
content_type=content_type,
json=_json,
template_url=self.caller.flow_sessions.create_flow_session.metadata["url"],
headers=headers,
)
request = _convert_request(request)
request.url = self.caller.flow_sessions._client.format_url(request.url)
pipeline_response = self.caller.flow_sessions._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self.caller.flow_sessions._deserialize.failsafe_deserialize(
_models.ErrorResponse, pipeline_response
)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
return
action = body.action or SetupFlowSessionAction.INSTALL.value
if action == SetupFlowSessionAction.INSTALL.value:
action = "creation"
else:
action = "reset"
logger.info(f"Start polling until session {action} is completed...")
# start polling status here.
if "azure-asyncoperation" not in response.headers:
raise FlowRequestException(
"No polling url found in response headers. "
f"Request id: {headers['x-ms-client-request-id']}. "
f"Response headers: {response.headers}."
)
polling_url = response.headers["azure-asyncoperation"]
time_run = 0
sleep_period = 5
status = None
timeout_seconds = SESSION_CREATION_TIMEOUT_SECONDS
# polling timeout, if user set SESSION_CREATION_TIMEOUT_SECONDS in environment var, use it
if os.environ.get(SESSION_CREATION_TIMEOUT_ENV_VAR):
try:
timeout_seconds = float(os.environ.get(SESSION_CREATION_TIMEOUT_ENV_VAR))
except ValueError:
raise UserErrorException(
"Environment variable {} with value {} set but failed to parse. "
"Please reset the value to a number.".format(
SESSION_CREATION_TIMEOUT_ENV_VAR, os.environ.get(SESSION_CREATION_TIMEOUT_ENV_VAR)
)
)
# InProgress is only known non-terminal status for now.
while status in [None, "InProgress"]:
if time_run + sleep_period > timeout_seconds:
message = (
f"Polling timeout for session {session_id} {action} "
f"for {AUTOMATIC_RUNTIME} after {timeout_seconds} seconds.\n"
f"To proceed the {action} for {AUTOMATIC_RUNTIME}, you can retry using the same flow, "
"and we will continue polling status of previous session. \n"
)
raise Exception(message)
time_run += sleep_period
time.sleep(sleep_period)
response = self.poll_operation_status(url=polling_url, **kwargs)
status = response["status"]
logger.debug(f"Current polling status: {status}")
if time_run % 30 == 0:
# print the message every 30 seconds to avoid users feeling stuck during the operation
print(f"Waiting for session {action}, current status: {status}")
else:
logger.debug(f"Waiting for session {action}, current status: {status}")
if status == "Succeeded":
error_msg = pydash.get(response, "error.message", None)
if error_msg:
logger.warning(
f"Session {action} finished with status {status}. "
f"But there are warnings when installing the packages: {error_msg}."
)
else:
logger.info(f"Session {action} finished with status {status}.")
else:
# refine response error message
try:
response["error"]["message"] = json.loads(response["error"]["message"])
except Exception:
pass
raise FlowRequestException(
f"Session {action} failed for {session_id}. \n"
f"Session {action} status: {status}. \n"
f"Request id: {headers['x-ms-client-request-id']}. \n"
f"{json.dumps(response, indent=2)}."
)
@_request_wrapper()
def poll_operation_status(
self, url, **kwargs # type: Any
):
from azure.core.rest import HttpRequest
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from promptflow.azure._restclient.flow.operations._flow_sessions_operations import _models
headers = self._get_headers()
request = HttpRequest(method="GET", url=url, headers=headers, **kwargs)
pipeline_response = self.caller.flow_sessions._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self.caller.flow_sessions._deserialize.failsafe_deserialize(
_models.ErrorResponse, pipeline_response
)
raise HttpResponseError(response=response, model=error)
deserialized = self.caller.flow_sessions._deserialize("object", pipeline_response)
if "status" not in deserialized:
raise FlowRequestException(
f"Status not found in response. Request id: {headers['x-ms-client-request-id']}. "
f"Response headers: {response.headers}."
)
return deserialized
@_request_wrapper()
def get_child_runs(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
index=None, # type: Optional[int]
start_index=None, # type: Optional[int]
end_index=None, # type: Optional[int]
**kwargs, # type: Any
):
"""Get child runs of a flow run."""
headers = self._get_headers()
return self.caller.bulk_runs.get_flow_child_runs(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
index=index,
start_index=start_index,
end_index=end_index,
headers=headers,
**kwargs,
)
@_request_wrapper()
def cancel_flow_run(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs, # type: Any
):
"""Cancel a flow run."""
headers = self._get_headers()
return self.caller.bulk_runs.cancel_flow_run(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
headers=headers,
**kwargs,
)
| promptflow/src/promptflow/promptflow/azure/_restclient/flow_service_caller.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow_service_caller.py",
"repo_id": "promptflow",
"token_count": 11276
} | 50 |
$schema: https://azuremlschemas.azureedge.net/latest/commandComponent.schema.json
# will be changed to flow to support parallelism
type: command
outputs:
output:
# PRS team will always aggregate all the outputs into a single file under this folder for now
type: uri_folder
| promptflow/src/promptflow/promptflow/azure/resources/component_spec_template.yaml/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/resources/component_spec_template.yaml",
"repo_id": "promptflow",
"token_count": 85
} | 51 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from enum import Enum
class RunMode(str, Enum):
"""An enumeration of possible run modes."""
Test = "Test"
SingleNode = "SingleNode"
Batch = "Batch"
@classmethod
def parse(cls, value: str):
"""Parse a string to a RunMode enum value.
:param value: The string to parse.
:type value: str
:return: The corresponding RunMode enum value.
:rtype: ~promptflow.contracts.run_mode.RunMode
:raises ValueError: If the value is not a valid string.
"""
if not isinstance(value, str):
raise ValueError(f"Invalid value type to parse: {type(value)}")
if value == "SingleNode":
return RunMode.SingleNode
elif value == "Batch":
return RunMode.Batch
else:
return RunMode.Test
| promptflow/src/promptflow/promptflow/contracts/run_mode.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/contracts/run_mode.py",
"repo_id": "promptflow",
"token_count": 374
} | 52 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from dataclasses import dataclass
from typing import Any, Dict, Mapping
from promptflow.contracts.run_info import FlowRunInfo, RunInfo
@dataclass
class LineResult:
"""The result of a line process."""
output: Mapping[str, Any] # The output of the line.
# The node output values to be used as aggregation inputs, if no aggregation node, it will be empty.
aggregation_inputs: Mapping[str, Any]
run_info: FlowRunInfo # The run info of the line.
node_run_infos: Mapping[str, RunInfo] # The run info of the nodes in the line.
@staticmethod
def deserialize(data: dict) -> "LineResult":
"""Deserialize the LineResult from a dict."""
return LineResult(
output=data.get("output"),
aggregation_inputs=data.get("aggregation_inputs", {}),
run_info=FlowRunInfo.deserialize(data.get("run_info")),
node_run_infos={k: RunInfo.deserialize(v) for k, v in data.get("node_run_infos", {}).items()},
)
@dataclass
class AggregationResult:
"""The result when running aggregation nodes in the flow."""
output: Mapping[str, Any] # The output of the aggregation nodes in the flow.
metrics: Dict[str, Any] # The metrics generated by the aggregation.
node_run_infos: Mapping[str, RunInfo] # The run info of the aggregation nodes.
@staticmethod
def deserialize(data: dict) -> "AggregationResult":
"""Deserialize the AggregationResult from a dict."""
return AggregationResult(
output=data.get("output", None),
metrics=data.get("metrics", None),
node_run_infos={k: RunInfo.deserialize(v) for k, v in data.get("node_run_infos", {}).items()},
)
| promptflow/src/promptflow/promptflow/executor/_result.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/executor/_result.py",
"repo_id": "promptflow",
"token_count": 669
} | 53 |
from pathlib import Path
PROMOTFLOW_ROOT = Path(__file__).parent.parent
RUNTIME_TEST_CONFIGS_ROOT = Path(PROMOTFLOW_ROOT / "tests/test_configs/runtime")
EXECUTOR_REQUESTS_ROOT = Path(PROMOTFLOW_ROOT / "tests/test_configs/executor_api_requests")
MODEL_ROOT = Path(PROMOTFLOW_ROOT / "tests/test_configs/e2e_samples")
CONNECTION_FILE = (PROMOTFLOW_ROOT / "connections.json").resolve().absolute().as_posix()
ENV_FILE = (PROMOTFLOW_ROOT / ".env").resolve().absolute().as_posix()
# below constants are used for pfazure and global config tests
DEFAULT_SUBSCRIPTION_ID = "96aede12-2f73-41cb-b983-6d11a904839b"
DEFAULT_RESOURCE_GROUP_NAME = "promptflow"
DEFAULT_WORKSPACE_NAME = "promptflow-eastus2euap"
DEFAULT_RUNTIME_NAME = "test-runtime-ci"
DEFAULT_REGISTRY_NAME = "promptflow-preview"
| promptflow/src/promptflow/tests/_constants.py/0 | {
"file_path": "promptflow/src/promptflow/tests/_constants.py",
"repo_id": "promptflow",
"token_count": 322
} | 54 |
from pathlib import Path
from tempfile import mkdtemp
import pytest
from promptflow.batch import BatchEngine
from promptflow.batch._result import BatchResult
from ..utils import get_flow_folder, get_flow_inputs_file, get_yaml_file
@pytest.mark.usefixtures("use_secrets_config_file", "dev_connections")
@pytest.mark.e2etest
class TestLangchain:
@pytest.mark.parametrize(
"flow_folder, inputs_mapping",
[
("flow_with_langchain_traces", {"question": "${data.question}"}),
("openai_chat_api_flow", {"question": "${data.question}", "chat_history": "${data.chat_history}"}),
("openai_completion_api_flow", {"prompt": "${data.prompt}"}),
],
)
def test_batch_with_langchain(self, flow_folder, inputs_mapping, dev_connections):
batch_engine = BatchEngine(
get_yaml_file(flow_folder), get_flow_folder(flow_folder), connections=dev_connections
)
input_dirs = {"data": get_flow_inputs_file(flow_folder)}
output_dir = Path(mkdtemp())
batch_results = batch_engine.run(input_dirs, inputs_mapping, output_dir)
assert isinstance(batch_results, BatchResult)
assert batch_results.total_lines == batch_results.completed_lines
assert batch_results.system_metrics.total_tokens > 0
| promptflow/src/promptflow/tests/executor/e2etests/test_langchain.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/e2etests/test_langchain.py",
"repo_id": "promptflow",
"token_count": 530
} | 55 |
inputs: {}
outputs: {}
nodes:
- name: tool_with_conn
type: python
source:
type: package
tool: tool_with_connection
inputs:
conn: test_conn
| promptflow/src/promptflow/tests/executor/package_tools/tool_with_connection/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/package_tools/tool_with_connection/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 63
} | 56 |
import inspect
import pytest
from promptflow._core.generator_proxy import GeneratorProxy
from promptflow._core.tracer import Tracer, _create_trace_from_function_call, _traced, trace
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.trace import Trace, TraceType
def generator():
for i in range(3):
yield i
@pytest.mark.unittest
class TestTracer:
def test_end_tracing(self):
# Activate the tracer in the current context
tracer = Tracer("test_run_id")
tracer._activate_in_context()
# Assert that there is an active tracer instance
assert Tracer.active_instance() is tracer
# End tracing and get the traces as a JSON string
traces = Tracer.end_tracing()
# Assert that the traces is a list
assert isinstance(traces, list)
# Assert that there is no active tracer instance after ending tracing
assert Tracer.active_instance() is None
# Test the raise_ex argument of the end_tracing method
with pytest.raises(Exception):
# Try to end tracing again with raise_ex=True
Tracer.end_tracing(raise_ex=True)
# Try to end tracing again with raise_ex=False
traces = Tracer.end_tracing(raise_ex=False)
# Assert that the traces are empty
assert not traces
def test_start_tracing(self):
# Assert that there is no active tracer instance before starting tracing
assert Tracer.active_instance() is None
# Start tracing with a mock run_id
Tracer.start_tracing("test_run_id")
# Assert that there is an active tracer instance after starting tracing
assert Tracer.active_instance() is not None
# Assert that the active tracer instance has the correct run_id
assert Tracer.active_instance()._run_id == "test_run_id"
Tracer.end_tracing()
def test_push_pop(self, caplog):
# test the push method with a single trace
Tracer.start_tracing("test_run_id")
tracer = Tracer.active_instance()
trace1 = Trace("test1", inputs=[1, 2, 3], type=TraceType.TOOL)
trace2 = Trace("test2", inputs=[4, 5, 6], type=TraceType.TOOL)
Tracer.push(trace1)
assert tracer._traces == [trace1]
assert tracer._id_to_trace == {trace1.id: trace1}
# test the push method with a nested trace
Tracer.push(trace2)
assert tracer._traces == [trace1] # check if the tracer still has only the first trace in its _traces list
# check if the tracer has both traces in its trace dict
assert tracer._id_to_trace == {trace1.id: trace1, trace2.id: trace2}
assert trace1.children == [trace2] # check if the first trace has the second trace as its child
# test the pop method with generator output
tool_output = generator()
error1 = ValueError("something went wrong")
assert tracer._get_current_trace() is trace2
output = Tracer.pop(output=tool_output, error=error1)
# check output iterator
for i in range(3):
assert next(output) == i
assert isinstance(trace2.output, GeneratorProxy)
assert trace2.error == {
"message": str(error1),
"type": type(error1).__qualname__,
}
assert tracer._get_current_trace() is trace1
# test the pop method with no arguments
output = Tracer.pop()
assert tracer._get_current_trace() is None
assert trace1.output is None
assert output is None
Tracer.end_tracing()
# test the push method with no active tracer
Tracer.push(trace1)
# assert that the warning message is logged
assert "Try to push trace but no active tracer in current context." in caplog.text
def test_unserializable_obj_to_serializable(self):
# assert that the function returns a str object for unserializable objects
assert Tracer.to_serializable(generator) == str(generator)
@pytest.mark.parametrize("obj", [({"name": "Alice", "age": 25}), ([1, 2, 3]), (GeneratorProxy(generator())), (42)])
def test_to_serializable(self, obj):
assert Tracer.to_serializable(obj) == obj
def func_with_no_parameters():
pass
def func_with_args_and_kwargs(arg1, arg2=None, *, kwarg1=None, kwarg2=None):
_ = (arg1, arg2, kwarg1, kwarg2)
async def func_with_args_and_kwargs_async(arg1, arg2=None, *, kwarg1=None, kwarg2=None):
_ = (arg1, arg2, kwarg1, kwarg2)
def func_with_connection_parameter(a: int, conn: AzureOpenAIConnection):
_ = (a, conn)
class MyClass:
def my_method(self, a: int):
_ = a
@pytest.mark.unittest
class TestCreateTraceFromFunctionCall:
"""This class tests the `_create_trace_from_function_call` function."""
def test_basic_fields_are_filled_and_others_are_not(self):
trace = _create_trace_from_function_call(func_with_no_parameters)
# These fields should be filled in this method call.
assert trace.name == "func_with_no_parameters"
assert trace.type == TraceType.FUNCTION
assert trace.inputs == {}
# start_time should be a timestamp, which is a float value currently.
assert isinstance(trace.start_time, float)
# These should be left empty in this method call.
# They will be filled by the tracer later.
assert trace.output is None
assert trace.end_time is None
assert trace.children == []
assert trace.error is None
def test_basic_fields_are_filled_for_async_functions(self):
trace = _create_trace_from_function_call(
func_with_args_and_kwargs_async, args=[1, 2], kwargs={"kwarg1": 3, "kwarg2": 4}
)
assert trace.name == "func_with_args_and_kwargs_async"
assert trace.type == TraceType.FUNCTION
assert trace.inputs == {"arg1": 1, "arg2": 2, "kwarg1": 3, "kwarg2": 4}
def test_trace_name_should_contain_class_name_for_class_methods(self):
obj = MyClass()
trace = _create_trace_from_function_call(obj.my_method, args=[obj, 1])
assert trace.name == "MyClass.my_method"
def test_trace_type_can_be_set_correctly(self):
trace = _create_trace_from_function_call(func_with_no_parameters, trace_type=TraceType.TOOL)
assert trace.type == TraceType.TOOL
def test_args_and_kwargs_are_filled_correctly(self):
trace = _create_trace_from_function_call(
func_with_args_and_kwargs, args=[1, 2], kwargs={"kwarg1": 3, "kwarg2": 4}
)
assert trace.inputs == {"arg1": 1, "arg2": 2, "kwarg1": 3, "kwarg2": 4}
def test_args_called_with_name_should_be_filled_correctly(self):
trace = _create_trace_from_function_call(func_with_args_and_kwargs, args=[1], kwargs={"arg2": 2, "kwarg2": 4})
assert trace.inputs == {"arg1": 1, "arg2": 2, "kwarg2": 4}
def test_kwargs_called_without_name_should_be_filled_correctly(self):
trace = _create_trace_from_function_call(func_with_args_and_kwargs, args=[1, 2, 3], kwargs={"kwarg2": 4})
assert trace.inputs == {"arg1": 1, "arg2": 2, "kwarg1": 3, "kwarg2": 4}
def test_empty_args_should_be_excluded_from_inputs(self):
trace = _create_trace_from_function_call(func_with_args_and_kwargs, args=[1])
assert trace.inputs == {"arg1": 1}
def test_empty_kwargs_should_be_excluded_from_inputs(self):
trace = _create_trace_from_function_call(func_with_args_and_kwargs, kwargs={"kwarg1": 1})
assert trace.inputs == {"kwarg1": 1}
trace = _create_trace_from_function_call(func_with_args_and_kwargs, kwargs={"kwarg2": 2})
assert trace.inputs == {"kwarg2": 2}
def test_args_and_kwargs_should_be_filled_in_called_order(self):
trace = _create_trace_from_function_call(
func_with_args_and_kwargs, args=[1, 2], kwargs={"kwarg2": 4, "kwarg1": 3}
)
assert list(trace.inputs.keys()) == ["arg1", "arg2", "kwarg2", "kwarg1"]
def test_connections_should_be_serialized(self):
conn = AzureOpenAIConnection("test_name", "test_secret")
trace = _create_trace_from_function_call(func_with_connection_parameter, args=[1, conn])
assert trace.inputs == {"a": 1, "conn": "AzureOpenAIConnection"}
def test_self_arg_should_be_excluded_from_inputs(self):
obj = MyClass()
trace = _create_trace_from_function_call(obj.my_method, args=[1])
assert trace.inputs == {"a": 1}
def sync_func(a: int):
return a
async def async_func(a: int):
return a
def sync_error_func(a: int):
a / 0
async def async_error_func(a: int):
a / 0
@pytest.mark.unittest
class TestTraced:
"""This class tests the `_traced` function."""
def test_traced_sync_func_should_be_a_sync_func(self):
assert inspect.iscoroutinefunction(_traced(sync_func)) is False
def test_traced_async_func_should_be_an_async_func(self):
assert inspect.iscoroutinefunction(_traced(async_func)) is True
@pytest.mark.parametrize("func", [sync_func, async_func])
def test_original_function_and_wrapped_function_should_have_same_name(self, func):
traced_func = _traced(func)
assert traced_func.__name__ == func.__name__
@pytest.mark.parametrize("func", [sync_func, async_func])
def test_original_function_and_wrapped_function_attributes_are_set(self, func):
traced_func = _traced(func)
assert getattr(traced_func, "__original_function") == func
@pytest.mark.asyncio
@pytest.mark.parametrize("func", [sync_func, async_func])
async def test_trace_is_not_generated_when_tracer_is_not_active(self, func):
# Do not call Tracer.start_tracing() here
traced_func = _traced(func)
if inspect.iscoroutinefunction(traced_func):
result = await traced_func(1)
else:
result = traced_func(1)
# Check the result is expected
assert result == 1
# Check the generated trace is not generated
traces = Tracer.end_tracing()
assert len(traces) == 0
@pytest.mark.asyncio
@pytest.mark.parametrize("func", [sync_func, async_func])
async def test_trace_is_generated_when_tracer_is_active(self, func):
Tracer.start_tracing("test_run_id")
traced_func = _traced(func)
if inspect.iscoroutinefunction(traced_func):
result = await traced_func(1)
else:
result = traced_func(1)
# Check the result is expected
assert result == 1
traces = Tracer.end_tracing()
# Check the generated trace is expected
assert len(traces) == 1
trace = traces[0]
assert trace["name"] == func.__qualname__
assert trace["type"] == TraceType.FUNCTION
assert trace["inputs"] == {"a": 1}
assert trace["output"] == 1
assert trace["error"] is None
assert trace["children"] == []
assert isinstance(trace["start_time"], float)
assert isinstance(trace["end_time"], float)
@pytest.mark.asyncio
@pytest.mark.parametrize("func", [sync_error_func, async_error_func])
async def test_trace_is_generated_when_errors_occurred(self, func):
Tracer.start_tracing("test_run_id")
traced_func = _traced(func)
with pytest.raises(ZeroDivisionError):
if inspect.iscoroutinefunction(traced_func):
await traced_func(1)
else:
traced_func(1)
traces = Tracer.end_tracing()
# Check the generated trace is expected
assert len(traces) == 1
trace = traces[0]
assert trace["name"] == func.__qualname__
assert trace["type"] == TraceType.FUNCTION
assert trace["inputs"] == {"a": 1}
assert trace["output"] is None
assert trace["error"] == {"message": "division by zero", "type": "ZeroDivisionError"}
assert trace["children"] == []
assert isinstance(trace["start_time"], float)
assert isinstance(trace["end_time"], float)
@pytest.mark.asyncio
@pytest.mark.parametrize("func", [sync_func, async_func])
async def test_trace_type_can_be_set_correctly(self, func):
Tracer.start_tracing("test_run_id")
traced_func = _traced(func, trace_type=TraceType.TOOL)
if inspect.iscoroutinefunction(traced_func):
result = await traced_func(1)
else:
result = traced_func(1)
assert result == 1
traces = Tracer.end_tracing()
# Check the generated trace is expected
assert len(traces) == 1
trace = traces[0]
assert trace["name"] == func.__qualname__
assert trace["type"] == TraceType.TOOL
@trace
def my_tool(a: int):
return a
@trace
async def my_tool_async(a: int):
return a
@pytest.mark.unittest
class TestTrace:
"""This class tests `trace` function."""
@pytest.mark.asyncio
@pytest.mark.parametrize(
"func",
[
my_tool,
my_tool_async,
],
)
async def test_traces_are_created_correctly(self, func):
Tracer.start_tracing("test_run_id")
if inspect.iscoroutinefunction(func):
result = await func(1)
else:
result = func(1)
assert result == 1
traces = Tracer.end_tracing()
assert len(traces) == 1
trace = traces[0]
assert trace["name"] == func.__qualname__
assert trace["type"] == TraceType.FUNCTION
assert trace["inputs"] == {"a": 1}
assert trace["output"] == 1
assert trace["error"] is None
assert trace["children"] == []
assert isinstance(trace["start_time"], float)
assert isinstance(trace["end_time"], float)
| promptflow/src/promptflow/tests/executor/unittests/_core/test_tracer.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/unittests/_core/test_tracer.py",
"repo_id": "promptflow",
"token_count": 5794
} | 57 |
import json
from pathlib import Path
from tempfile import mkdtemp
from typing import Optional
from unittest.mock import AsyncMock, patch
import httpx
import pytest
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow.batch._base_executor_proxy import APIBasedExecutorProxy
from promptflow.batch._errors import ExecutorServiceUnhealthy
from promptflow.contracts.run_info import Status
from promptflow.exceptions import ErrorTarget, ValidationException
from promptflow.executor._errors import ConnectionNotFound
from promptflow.storage._run_storage import AbstractRunStorage
from ...mock_execution_server import _get_aggr_result_dict, _get_line_result_dict
@pytest.mark.unittest
class TestAPIBasedExecutorProxy:
@pytest.mark.asyncio
@pytest.mark.parametrize(
"has_error",
[False, True],
)
async def test_exec_line_async(self, has_error):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
run_id = "test_run_id"
index = 1
inputs = {"question": "test"}
with patch("httpx.AsyncClient.post", new_callable=AsyncMock) as mock:
line_result_dict = _get_line_result_dict(run_id, index, inputs, has_error=has_error)
status_code = 400 if has_error else 200
mock.return_value = httpx.Response(status_code, json=line_result_dict)
line_result = await mock_executor_proxy.exec_line_async(inputs, index, run_id)
assert line_result.output == {} if has_error else {"answer": "Hello world!"}
assert line_result.run_info.run_id == run_id
assert line_result.run_info.index == index
assert line_result.run_info.status == Status.Failed if has_error else Status.Completed
assert line_result.run_info.inputs == inputs
assert (line_result.run_info.error is not None) == has_error
@pytest.mark.asyncio
async def test_exec_aggregation_async(self):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
run_id = "test_run_id"
batch_inputs = {"question": ["test", "error"]}
aggregation_inputs = {"${get_answer.output}": ["Incorrect", "Correct"]}
with patch("httpx.AsyncClient.post", new_callable=AsyncMock) as mock:
aggr_result_dict = _get_aggr_result_dict(run_id, aggregation_inputs)
mock.return_value = httpx.Response(200, json=aggr_result_dict)
aggr_result = await mock_executor_proxy.exec_aggregation_async(batch_inputs, aggregation_inputs, run_id)
assert aggr_result.metrics == {"accuracy": 0.5}
assert len(aggr_result.node_run_infos) == 1
assert aggr_result.node_run_infos["aggregation"].flow_run_id == run_id
assert aggr_result.node_run_infos["aggregation"].inputs == aggregation_inputs
assert aggr_result.node_run_infos["aggregation"].status == Status.Completed
@pytest.mark.asyncio
async def test_ensure_executor_startup_when_no_error(self):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "ensure_executor_health", new_callable=AsyncMock) as mock:
with patch.object(APIBasedExecutorProxy, "_check_startup_error_from_file") as mock_check_startup_error:
await mock_executor_proxy.ensure_executor_startup("")
mock_check_startup_error.assert_not_called()
mock.assert_called_once()
@pytest.mark.asyncio
async def test_ensure_executor_startup_when_not_healthy(self):
# empty error file
error_file = Path(mkdtemp()) / "error.json"
error_file.touch()
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "ensure_executor_health", new_callable=AsyncMock) as mock:
mock.side_effect = ExecutorServiceUnhealthy("executor unhealthy")
with pytest.raises(ExecutorServiceUnhealthy) as ex:
await mock_executor_proxy.ensure_executor_startup(error_file)
assert ex.value.message == "executor unhealthy"
mock.assert_called_once()
@pytest.mark.asyncio
async def test_ensure_executor_startup_when_existing_validation_error(self):
# prepare the error file
error_file = Path(mkdtemp()) / "error.json"
error_message = "Connection 'aoai_conn' not found"
error_dict = ExceptionPresenter.create(ConnectionNotFound(message=error_message)).to_dict()
with open(error_file, "w") as file:
json.dump(error_dict, file, indent=4)
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "ensure_executor_health", new_callable=AsyncMock) as mock:
mock.side_effect = ExecutorServiceUnhealthy("executor unhealthy")
with pytest.raises(ValidationException) as ex:
await mock_executor_proxy.ensure_executor_startup(error_file)
assert ex.value.message == error_message
assert ex.value.target == ErrorTarget.BATCH
@pytest.mark.asyncio
async def test_ensure_executor_health_when_healthy(self):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "_check_health", return_value=True) as mock:
await mock_executor_proxy.ensure_executor_health()
mock.assert_called_once()
@pytest.mark.asyncio
async def test_ensure_executor_health_when_unhealthy(self):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "_check_health", return_value=False) as mock:
with pytest.raises(ExecutorServiceUnhealthy):
await mock_executor_proxy.ensure_executor_health()
assert mock.call_count == 20
@pytest.mark.asyncio
async def test_ensure_executor_health_when_not_active(self):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "_check_health", return_value=False) as mock:
with patch.object(APIBasedExecutorProxy, "_is_executor_active", return_value=False):
with pytest.raises(ExecutorServiceUnhealthy):
await mock_executor_proxy.ensure_executor_health()
mock.assert_not_called()
@pytest.mark.asyncio
@pytest.mark.parametrize(
"mock_value, expected_result",
[
(httpx.Response(200), True),
(httpx.Response(500), False),
(Exception("error"), False),
],
)
async def test_check_health(self, mock_value, expected_result):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch("httpx.AsyncClient.get", new_callable=AsyncMock) as mock:
mock.return_value = mock_value
assert await mock_executor_proxy._check_health() is expected_result
@pytest.mark.asyncio
@pytest.mark.parametrize(
"response, expected_result",
[
(
httpx.Response(200, json={"result": "test"}),
{"result": "test"},
),
(
httpx.Response(500, json={"error": "test error"}),
"test error",
),
(
httpx.Response(400, json={"detail": "test"}),
{
"message": 'Unexpected error when executing a line, status code: 400, error: {"detail": "test"}',
"messageFormat": (
"Unexpected error when executing a line, " "status code: {status_code}, error: {error}"
),
"messageParameters": {
"status_code": "400",
"error": '{"detail": "test"}',
},
"referenceCode": "Unknown",
"code": "SystemError",
"innerError": {
"code": "UnexpectedError",
"innerError": None,
},
},
),
(
httpx.Response(502, text="test"),
{
"message": "Unexpected error when executing a line, status code: 502, error: test",
"messageFormat": (
"Unexpected error when executing a line, " "status code: {status_code}, error: {error}"
),
"messageParameters": {
"status_code": "502",
"error": "test",
},
"referenceCode": "Unknown",
"code": "SystemError",
"innerError": {
"code": "UnexpectedError",
"innerError": None,
},
},
),
],
)
async def test_process_http_response(self, response, expected_result):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
assert mock_executor_proxy._process_http_response(response) == expected_result
class MockAPIBasedExecutorProxy(APIBasedExecutorProxy):
@property
def api_endpoint(self) -> str:
return "http://localhost:8080"
@classmethod
async def create(
cls,
flow_file: Path,
working_dir: Optional[Path] = None,
*,
connections: Optional[dict] = None,
storage: Optional[AbstractRunStorage] = None,
**kwargs,
) -> "MockAPIBasedExecutorProxy":
return MockAPIBasedExecutorProxy()
| promptflow/src/promptflow/tests/executor/unittests/batch/test_base_executor_proxy.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/unittests/batch/test_base_executor_proxy.py",
"repo_id": "promptflow",
"token_count": 4370
} | 58 |
import pytest
from promptflow._core.tool_meta_generator import PythonLoadError
from promptflow.exceptions import ErrorTarget
from promptflow.executor._errors import ResolveToolError
def code_with_bug():
1 / 0
def raise_resolve_tool_error(func, target=None, module=None):
try:
func()
except Exception as e:
if target:
raise ResolveToolError(node_name="MyTool", target=target, module=module) from e
raise ResolveToolError(node_name="MyTool") from e
def raise_python_load_error():
try:
code_with_bug()
except Exception as e:
raise PythonLoadError(message="Test PythonLoadError.") from e
def test_resolve_tool_error():
with pytest.raises(ResolveToolError) as e:
raise_resolve_tool_error(raise_python_load_error, ErrorTarget.TOOL, "__pf_main__")
exception = e.value
inner_exception = exception.inner_exception
assert isinstance(inner_exception, PythonLoadError)
assert exception.message == "Tool load failed in 'MyTool': (PythonLoadError) Test PythonLoadError."
assert exception.additional_info == inner_exception.additional_info
assert exception.error_codes == ["UserError", "ToolValidationError", "PythonParsingError", "PythonLoadError"]
assert exception.reference_code == "Tool/__pf_main__"
def test_resolve_tool_error_with_none_inner():
with pytest.raises(ResolveToolError) as e:
raise ResolveToolError(node_name="MyTool")
exception = e.value
assert exception.inner_exception is None
assert exception.message == "Tool load failed in 'MyTool'."
assert exception.additional_info is None
assert exception.error_codes == ["SystemError", "ResolveToolError"]
assert exception.reference_code == "Executor"
def test_resolve_tool_error_with_no_PromptflowException_inner():
with pytest.raises(ResolveToolError) as e:
raise_resolve_tool_error(code_with_bug)
exception = e.value
assert isinstance(exception.inner_exception, ZeroDivisionError)
assert exception.message == "Tool load failed in 'MyTool': (ZeroDivisionError) division by zero"
assert exception.additional_info is None
assert exception.error_codes == ["SystemError", "ZeroDivisionError"]
assert exception.reference_code == "Executor"
| promptflow/src/promptflow/tests/executor/unittests/executor/test_errors.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/unittests/executor/test_errors.py",
"repo_id": "promptflow",
"token_count": 773
} | 59 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import logging
import os
import uuid
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Callable, Optional
from unittest.mock import patch
import jwt
import pytest
from azure.core.exceptions import ResourceNotFoundError
from mock import mock
from pytest_mock import MockerFixture
from promptflow._sdk._constants import FlowType, RunStatus
from promptflow._sdk._utils import ClientUserAgentUtil
from promptflow._sdk.entities import Run
from promptflow.azure import PFClient
from promptflow.azure._entities._flow import Flow
from ._azure_utils import get_cred
from .recording_utilities import (
PFAzureIntegrationTestRecording,
SanitizedValues,
VariableRecorder,
get_created_flow_name_from_flow_path,
get_pf_client_for_replay,
is_live,
is_record,
is_replay,
)
FLOWS_DIR = "./tests/test_configs/flows"
EAGER_FLOWS_DIR = "./tests/test_configs/eager_flows"
DATAS_DIR = "./tests/test_configs/datas"
AZUREML_RESOURCE_PROVIDER = "Microsoft.MachineLearningServices"
RESOURCE_ID_FORMAT = "/subscriptions/{}/resourceGroups/{}/providers/{}/workspaces/{}"
def package_scope_in_live_mode() -> str:
"""Determine the scope of some expected sharing fixtures.
We have many tests against flows and runs, and it's very time consuming to create a new flow/run
for each test. So we expect to leverage pytest fixture concept to share flows/runs across tests.
However, we also have replay tests, which require function scope fixture as it will locate the
recording YAML based on the test function info.
Use this function to determine the scope of the fixtures dynamically. For those fixtures that
will request dynamic scope fixture(s), they also need to be dynamic scope.
"""
# package-scope should be enough for Azure tests
return "package" if is_live() else "function"
@pytest.fixture(scope=package_scope_in_live_mode())
def user_object_id() -> str:
if is_replay():
return SanitizedValues.USER_OBJECT_ID
credential = get_cred()
access_token = credential.get_token("https://management.azure.com/.default")
decoded_token = jwt.decode(access_token.token, options={"verify_signature": False})
return decoded_token["oid"]
@pytest.fixture(scope=package_scope_in_live_mode())
def tenant_id() -> str:
if is_replay():
return SanitizedValues.TENANT_ID
credential = get_cred()
access_token = credential.get_token("https://management.azure.com/.default")
decoded_token = jwt.decode(access_token.token, options={"verify_signature": False})
return decoded_token["tid"]
@pytest.fixture(scope=package_scope_in_live_mode())
def ml_client(
subscription_id: str,
resource_group_name: str,
workspace_name: str,
):
"""return a machine learning client using default e2e testing workspace"""
from azure.ai.ml import MLClient
return MLClient(
credential=get_cred(),
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
cloud="AzureCloud",
)
@pytest.fixture(scope=package_scope_in_live_mode())
def remote_client(subscription_id: str, resource_group_name: str, workspace_name: str):
from promptflow.azure import PFClient
if is_replay():
client = get_pf_client_for_replay()
else:
client = PFClient(
credential=get_cred(),
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
)
assert "promptflow-sdk" in ClientUserAgentUtil.get_user_agent()
assert "promptflow/" not in ClientUserAgentUtil.get_user_agent()
yield client
@pytest.fixture
def remote_workspace_resource_id(subscription_id: str, resource_group_name: str, workspace_name: str) -> str:
return "azureml:" + RESOURCE_ID_FORMAT.format(
subscription_id, resource_group_name, AZUREML_RESOURCE_PROVIDER, workspace_name
)
@pytest.fixture(scope=package_scope_in_live_mode())
def pf(remote_client):
# do not add annotation here, because PFClient will trigger promptflow.azure imports and break the isolation
# between azure and non-azure tests
yield remote_client
@pytest.fixture
def remote_web_classification_data(remote_client):
from azure.ai.ml.entities import Data
data_name, data_version = "webClassification1", "1"
try:
return remote_client.ml_client.data.get(name=data_name, version=data_version)
except ResourceNotFoundError:
return remote_client.ml_client.data.create_or_update(
Data(name=data_name, version=data_version, path=f"{DATAS_DIR}/webClassification1.jsonl", type="uri_file")
)
@pytest.fixture(scope="session")
def runtime(runtime_name: str) -> str:
return runtime_name
PROMPTFLOW_ROOT = Path(__file__) / "../../.."
MODEL_ROOT = Path(PROMPTFLOW_ROOT / "tests/test_configs/flows")
@pytest.fixture
def flow_serving_client_remote_connection(mocker: MockerFixture, remote_workspace_resource_id):
from promptflow._sdk._serving.app import create_app as create_serving_app
model_path = (Path(MODEL_ROOT) / "basic-with-connection").resolve().absolute().as_posix()
mocker.patch.dict(os.environ, {"PROMPTFLOW_PROJECT_PATH": model_path})
mocker.patch.dict(os.environ, {"USER_AGENT": "test-user-agent"})
app = create_serving_app(
config={"connection.provider": remote_workspace_resource_id},
environment_variables={"API_TYPE": "${azure_open_ai_connection.api_type}"},
)
app.config.update(
{
"TESTING": True,
}
)
return app.test_client()
@pytest.fixture
def flow_serving_client_with_prt_config_env(
mocker: MockerFixture, subscription_id, resource_group_name, workspace_name
): # noqa: E501
connections = {
"PRT_CONFIG_OVERRIDE": f"deployment.subscription_id={subscription_id},"
f"deployment.resource_group={resource_group_name},"
f"deployment.workspace_name={workspace_name},"
"app.port=8088",
}
return create_serving_client_with_connections("basic-with-connection", mocker, connections)
@pytest.fixture
def flow_serving_client_with_connection_provider_env(mocker: MockerFixture, remote_workspace_resource_id):
connections = {"PROMPTFLOW_CONNECTION_PROVIDER": remote_workspace_resource_id}
return create_serving_client_with_connections("basic-with-connection", mocker, connections)
@pytest.fixture
def flow_serving_client_with_aml_resource_id_env(mocker: MockerFixture, remote_workspace_resource_id):
aml_resource_id = "{}/onlineEndpoints/{}/deployments/{}".format(remote_workspace_resource_id, "myendpoint", "blue")
connections = {"AML_DEPLOYMENT_RESOURCE_ID": aml_resource_id}
return create_serving_client_with_connections("basic-with-connection", mocker, connections)
@pytest.fixture
def serving_client_with_connection_name_override(mocker: MockerFixture, remote_workspace_resource_id):
connections = {
"aoai_connection": "azure_open_ai_connection",
"PROMPTFLOW_CONNECTION_PROVIDER": remote_workspace_resource_id,
}
return create_serving_client_with_connections("llm_connection_override", mocker, connections)
@pytest.fixture
def serving_client_with_connection_data_override(mocker: MockerFixture, remote_workspace_resource_id):
model_name = "llm_connection_override"
model_path = (Path(MODEL_ROOT) / model_name).resolve().absolute()
# load arm connection template
connection_arm_template = model_path.joinpath("connection_arm_template.json").read_text()
connections = {
"aoai_connection": connection_arm_template,
"PROMPTFLOW_CONNECTION_PROVIDER": remote_workspace_resource_id,
}
return create_serving_client_with_connections(model_name, mocker, connections)
def create_serving_client_with_connections(model_name, mocker: MockerFixture, connections: dict = {}):
from promptflow._sdk._serving.app import create_app as create_serving_app
model_path = (Path(MODEL_ROOT) / model_name).resolve().absolute().as_posix()
mocker.patch.dict(os.environ, {"PROMPTFLOW_PROJECT_PATH": model_path})
mocker.patch.dict(
os.environ,
{
**connections,
},
)
# Set credential to None for azureml extension type
# As we mock app in github workflow, which do not have managed identity credential
func = "promptflow._sdk._serving.extension.azureml_extension._get_managed_identity_credential_with_retry"
with mock.patch(func) as mock_cred_func:
mock_cred_func.return_value = None
app = create_serving_app(
environment_variables={"API_TYPE": "${azure_open_ai_connection.api_type}"},
extension_type="azureml",
)
app.config.update(
{
"TESTING": True,
}
)
return app.test_client()
@pytest.fixture(scope=package_scope_in_live_mode())
def variable_recorder() -> VariableRecorder:
yield VariableRecorder()
@pytest.fixture(scope=package_scope_in_live_mode())
def randstr(variable_recorder: VariableRecorder) -> Callable[[str], str]:
"""Return a "random" UUID."""
def generate_random_string(variable_name: str) -> str:
random_string = str(uuid.uuid4())
if is_live():
return random_string
elif is_replay():
return variable_name
else:
return variable_recorder.get_or_record_variable(variable_name, random_string)
return generate_random_string
@pytest.fixture(scope=package_scope_in_live_mode())
def vcr_recording(
request: pytest.FixtureRequest, user_object_id: str, tenant_id: str, variable_recorder: VariableRecorder
) -> Optional[PFAzureIntegrationTestRecording]:
"""Fixture to record or replay network traffic.
If the test mode is "live", nothing will happen.
If the test mode is "record" or "replay", this fixture will locate a YAML (recording) file
based on the test file, class and function name, write to (record) or read from (replay) the file.
"""
if is_live():
yield None
else:
recording = PFAzureIntegrationTestRecording.from_test_case(
test_class=request.cls,
test_func_name=request.node.name,
user_object_id=user_object_id,
tenant_id=tenant_id,
variable_recorder=variable_recorder,
)
recording.enter_vcr()
request.addfinalizer(recording.exit_vcr)
yield recording
# we expect this fixture only work when running live test without recording
# when recording, we don't want to record any application insights secrets
# when replaying, we also don't need this
@pytest.fixture(autouse=not is_live())
def mock_appinsights_log_handler(mocker: MockerFixture) -> None:
dummy_logger = logging.getLogger("dummy")
mocker.patch("promptflow._sdk._telemetry.telemetry.get_telemetry_logger", return_value=dummy_logger)
return
@pytest.fixture
def single_worker_thread_pool() -> None:
"""Mock to use one thread for thread pool executor.
VCR.py cannot record network traffic in other threads, and we have multi-thread operations
during resolving the flow. Mock it using one thread to make VCR.py work.
"""
def single_worker_thread_pool_executor(*args, **kwargs):
return ThreadPoolExecutor(max_workers=1)
if is_live():
yield
else:
with patch(
"promptflow.azure.operations._run_operations.ThreadPoolExecutor",
new=single_worker_thread_pool_executor,
):
yield
@pytest.fixture
def mock_set_headers_with_user_aml_token(mocker: MockerFixture) -> None:
"""Mock set aml-user-token operation.
There will be requests fetching cloud metadata during retrieving AML token, which will break during replay.
As the logic comes from azure-ai-ml, changes in Prompt Flow can hardly affect it, mock it here.
"""
if not is_live():
mocker.patch(
"promptflow.azure._restclient.flow_service_caller.FlowServiceCaller._set_headers_with_user_aml_token"
)
yield
@pytest.fixture
def mock_get_azure_pf_client(mocker: MockerFixture, remote_client) -> None:
"""Mock PF Azure client to avoid network traffic during replay test."""
if not is_live():
mocker.patch(
"promptflow._cli._pf_azure._run._get_azure_pf_client",
return_value=remote_client,
)
mocker.patch(
"promptflow._cli._pf_azure._flow._get_azure_pf_client",
return_value=remote_client,
)
yield
@pytest.fixture(scope=package_scope_in_live_mode())
def mock_get_user_identity_info(user_object_id: str, tenant_id: str) -> None:
"""Mock get user object id and tenant id, currently used in flow list operation."""
if not is_live():
with patch(
"promptflow.azure._restclient.flow_service_caller.FlowServiceCaller._get_user_identity_info",
return_value=(user_object_id, tenant_id),
):
yield
else:
yield
@pytest.fixture(scope=package_scope_in_live_mode())
def created_flow(pf: PFClient, randstr: Callable[[str], str], variable_recorder: VariableRecorder) -> Flow:
"""Create a flow for test."""
flow_display_name = randstr("flow_display_name")
flow_source = FLOWS_DIR + "/simple_hello_world/"
description = "test flow description"
tags = {"owner": "sdk-test"}
result = pf.flows.create_or_update(
flow=flow_source, display_name=flow_display_name, type=FlowType.STANDARD, description=description, tags=tags
)
remote_flow_dag_path = result.path
# make sure the flow is created successfully
assert pf.flows._storage_client._check_file_share_file_exist(remote_flow_dag_path) is True
assert result.display_name == flow_display_name
assert result.type == FlowType.STANDARD
assert result.tags == tags
assert result.path.endswith("flow.dag.yaml")
# flow in Azure will have different file share name with timestamp
# and this is a client-side behavior, so we need to sanitize this in recording
# so extract this during record test
if is_record():
flow_name_const = "flow_name"
flow_name = get_created_flow_name_from_flow_path(result.path)
variable_recorder.get_or_record_variable(flow_name_const, flow_name)
yield result
@pytest.fixture(scope=package_scope_in_live_mode())
def created_batch_run_without_llm(pf: PFClient, randstr: Callable[[str], str], runtime: str) -> Run:
"""Create a batch run that does not require LLM."""
name = randstr("batch_run_name")
run = pf.run(
# copy test_configs/flows/simple_hello_world to a separate folder
# as pf.run will generate .promptflow/flow.tools.json
# it will affect Azure file share upload logic and replay test
flow=f"{FLOWS_DIR}/hello-world",
data=f"{DATAS_DIR}/webClassification3.jsonl",
column_mapping={"name": "${data.url}"},
name=name,
display_name="sdk-cli-test-fixture-batch-run-without-llm",
)
run = pf.runs.stream(run=name)
assert run.status == RunStatus.COMPLETED
yield run
@pytest.fixture(scope=package_scope_in_live_mode())
def simple_eager_run(pf: PFClient, randstr: Callable[[str], str]) -> Run:
"""Create a simple eager run."""
run = pf.run(
flow=f"{EAGER_FLOWS_DIR}/simple_with_req",
data=f"{DATAS_DIR}/simple_eager_flow_data.jsonl",
name=randstr("name"),
)
pf.runs.stream(run)
run = pf.runs.get(run)
assert run.status == RunStatus.COMPLETED
yield run
@pytest.fixture(scope=package_scope_in_live_mode())
def created_eval_run_without_llm(
pf: PFClient, randstr: Callable[[str], str], runtime: str, created_batch_run_without_llm: Run
) -> Run:
"""Create a evaluation run against batch run without LLM dependency."""
name = randstr("eval_run_name")
run = pf.run(
flow=f"{FLOWS_DIR}/eval-classification-accuracy",
data=f"{DATAS_DIR}/webClassification3.jsonl",
run=created_batch_run_without_llm,
column_mapping={"groundtruth": "${data.answer}", "prediction": "${run.outputs.result}"},
runtime=runtime,
name=name,
display_name="sdk-cli-test-fixture-eval-run-without-llm",
)
run = pf.runs.stream(run=name)
assert run.status == RunStatus.COMPLETED
yield run
@pytest.fixture(scope=package_scope_in_live_mode())
def created_failed_run(pf: PFClient, randstr: Callable[[str], str], runtime: str) -> Run:
"""Create a failed run."""
name = randstr("failed_run_name")
run = pf.run(
flow=f"{FLOWS_DIR}/partial_fail",
data=f"{DATAS_DIR}/webClassification3.jsonl",
runtime=runtime,
name=name,
display_name="sdk-cli-test-fixture-failed-run",
)
# set raise_on_error to False to promise returning something
run = pf.runs.stream(run=name, raise_on_error=False)
assert run.status == RunStatus.FAILED
yield run
@pytest.fixture(autouse=not is_live())
def mock_vcrpy_for_httpx() -> None:
# there is a known issue in vcrpy handling httpx response: https://github.com/kevin1024/vcrpy/pull/591
# the related code change has not been merged, so we need such a fixture for patch
def _transform_headers(httpx_response):
out = {}
for key, var in httpx_response.headers.raw:
decoded_key = key.decode("utf-8")
decoded_var = var.decode("utf-8")
if decoded_key.lower() == "content-encoding" and decoded_var in ("gzip", "deflate"):
continue
out.setdefault(decoded_key, [])
out[decoded_key].append(decoded_var)
return out
with patch("vcr.stubs.httpx_stubs._transform_headers", new=_transform_headers):
yield
@pytest.fixture(autouse=not is_live())
def mock_to_thread() -> None:
# https://docs.python.org/3/library/asyncio-task.html#asyncio.to_thread
# to_thread actually uses a separate thread, which will break mocks
# so we need to mock it to avoid using a separate thread
# this is only for AsyncRunDownloader.to_thread
async def to_thread(func, /, *args, **kwargs):
func(*args, **kwargs)
with patch(
"promptflow.azure.operations._async_run_downloader.to_thread",
new=to_thread,
):
yield
@pytest.fixture
def mock_isinstance_for_mock_datastore() -> None:
"""Mock built-in function isinstance.
We have an isinstance check during run download for datastore type for better error message;
while our mock datastore in replay mode is not a valid type, so mock it with strict condition.
"""
if not is_replay():
yield
else:
from azure.ai.ml.entities._datastore.azure_storage import AzureBlobDatastore
from .recording_utilities.utils import MockDatastore
original_isinstance = isinstance
def mock_isinstance(*args):
if original_isinstance(args[0], MockDatastore) and args[1] == AzureBlobDatastore:
return True
return original_isinstance(*args)
with patch("builtins.isinstance", new=mock_isinstance):
yield
@pytest.fixture(autouse=True)
def mock_check_latest_version() -> None:
"""Mock check latest version.
As CI uses docker, it will always trigger this check behavior, and we don't have recording for this;
and this will hit many unknown issue with vcrpy.
"""
with patch("promptflow._utils.version_hint_utils.check_latest_version", new=lambda: None):
yield
| promptflow/src/promptflow/tests/sdk_cli_azure_test/conftest.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_azure_test/conftest.py",
"repo_id": "promptflow",
"token_count": 7579
} | 60 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import base64
import json
from typing import Dict
from vcr.request import Request
from .constants import AzureMLResourceTypes, SanitizedValues
from .utils import (
is_json_payload_request,
is_json_payload_response,
sanitize_azure_workspace_triad,
sanitize_email,
sanitize_experiment_id,
sanitize_pfs_request_body,
sanitize_pfs_response_body,
sanitize_upload_hash,
sanitize_username,
)
class RecordingProcessor:
def process_request(self, request: Request) -> Request:
return request
def process_response(self, response: Dict) -> Dict:
return response
class AzureWorkspaceTriadProcessor(RecordingProcessor):
"""Sanitize subscription id, resource group name and workspace name."""
def process_request(self, request: Request) -> Request:
request.uri = sanitize_azure_workspace_triad(request.uri)
return request
def process_response(self, response: Dict) -> Dict:
response["body"]["string"] = sanitize_azure_workspace_triad(response["body"]["string"])
return response
class AzureMLExperimentIDProcessor(RecordingProcessor):
"""Sanitize Azure ML experiment id, currently we use workspace id as the value."""
def process_request(self, request: Request) -> Request:
request.uri = sanitize_experiment_id(request.uri)
return request
def process_response(self, response: Dict) -> Dict:
if is_json_payload_response(response):
if "experimentId" in response["body"]["string"]:
body = json.loads(response["body"]["string"])
if "experimentId" in body:
body["experimentId"] = SanitizedValues.WORKSPACE_ID
response["body"]["string"] = json.dumps(body)
return response
class AzureResourceProcessor(RecordingProcessor):
"""Sanitize sensitive data in Azure resource GET response."""
def __init__(self):
# datastore related
self.storage_account_names = set()
self.storage_container_names = set()
self.file_share_names = set()
def _sanitize_request_url_for_storage(self, uri: str) -> str:
# this instance will store storage account names and container names
# so we can apply the sanitization here with simple string replace rather than regex
for account_name in self.storage_account_names:
uri = uri.replace(account_name, SanitizedValues.FAKE_ACCOUNT_NAME)
for container_name in self.storage_container_names:
uri = uri.replace(container_name, SanitizedValues.FAKE_CONTAINER_NAME)
for file_share_name in self.file_share_names:
uri = uri.replace(file_share_name, SanitizedValues.FAKE_FILE_SHARE_NAME)
return uri
def process_request(self, request: Request) -> Request:
request.uri = self._sanitize_request_url_for_storage(request.uri)
return request
def _sanitize_response_body(self, body: Dict) -> Dict:
resource_type = body.get("type")
if resource_type == AzureMLResourceTypes.WORKSPACE:
body = self._sanitize_response_for_workspace(body)
elif resource_type == AzureMLResourceTypes.CONNECTION:
body = self._sanitize_response_for_arm_connection(body)
elif resource_type == AzureMLResourceTypes.DATASTORE:
body = self._sanitize_response_for_datastore(body)
return body
def process_response(self, response: Dict) -> Dict:
if is_json_payload_response(response):
body = json.loads(response["body"]["string"])
if isinstance(body, dict):
# response can be a list sometimes (e.g. get workspace datastores)
# need to sanitize each with a for loop
if "value" in body:
resources = body["value"]
for i in range(len(resources)):
resources[i] = self._sanitize_response_body(resources[i])
body["value"] = resources
else:
body = self._sanitize_response_body(body)
response["body"]["string"] = json.dumps(body)
return response
def _sanitize_response_for_workspace(self, body: Dict) -> Dict:
filter_keys = ["identity", "properties", "systemData"]
for k in filter_keys:
if k in body:
body.pop(k)
# need during the constructor of FlowServiceCaller (for vNet case)
body["properties"] = {"discoveryUrl": SanitizedValues.DISCOVERY_URL}
name = body["name"]
body["name"] = SanitizedValues.WORKSPACE_NAME
body["id"] = body["id"].replace(name, SanitizedValues.WORKSPACE_NAME)
return body
def _sanitize_response_for_arm_connection(self, body: Dict) -> Dict:
if body["properties"]["authType"] == "CustomKeys":
# custom connection, sanitize "properties.credentials.keys"
body["properties"]["credentials"]["keys"] = {}
else:
# others, sanitize "properties.credentials.key"
body["properties"]["credentials"]["key"] = "_"
body["properties"]["target"] = "_"
return body
def _sanitize_response_for_datastore(self, body: Dict) -> Dict:
body["properties"]["subscriptionId"] = SanitizedValues.SUBSCRIPTION_ID
body["properties"]["resourceGroup"] = SanitizedValues.RESOURCE_GROUP_NAME
self.storage_account_names.add(body["properties"]["accountName"])
body["properties"]["accountName"] = SanitizedValues.FAKE_ACCOUNT_NAME
# blob storage
if "containerName" in body["properties"]:
self.storage_container_names.add(body["properties"]["containerName"])
body["properties"]["containerName"] = SanitizedValues.FAKE_CONTAINER_NAME
# file share
elif "fileShareName" in body["properties"]:
self.file_share_names.add(body["properties"]["fileShareName"])
body["properties"]["fileShareName"] = SanitizedValues.FAKE_FILE_SHARE_NAME
return body
class AzureOpenAIConnectionProcessor(RecordingProcessor):
"""Sanitize api_base in AOAI connection GET response."""
def process_response(self, response: Dict) -> Dict:
if is_json_payload_response(response):
body = json.loads(response["body"]["string"])
if isinstance(body, dict) and body.get("connectionType") == "AzureOpenAI":
body["configs"]["api_base"] = SanitizedValues.FAKE_API_BASE
response["body"]["string"] = json.dumps(body)
return response
class StorageProcessor(RecordingProcessor):
"""Sanitize sensitive data during storage operations when submit run."""
def process_request(self, request: Request) -> Request:
request.uri = sanitize_upload_hash(request.uri)
request.uri = sanitize_username(request.uri)
if is_json_payload_request(request) and request.body is not None:
body = request.body.decode("utf-8")
body = sanitize_upload_hash(body)
body = sanitize_username(body)
request.body = body.encode("utf-8")
return request
def process_response(self, response: Dict) -> Dict:
if is_json_payload_response(response):
response["body"]["string"] = sanitize_username(response["body"]["string"])
body = json.loads(response["body"]["string"])
if isinstance(body, dict):
self._sanitize_list_secrets_response(body)
response["body"]["string"] = json.dumps(body)
return response
def _sanitize_list_secrets_response(self, body: Dict) -> Dict:
if "key" in body:
b64_key = base64.b64encode(SanitizedValues.FAKE_KEY.encode("ascii"))
body["key"] = str(b64_key, "ascii")
return body
class DropProcessor(RecordingProcessor):
"""Ignore some requests that won't be used during playback."""
def process_request(self, request: Request) -> Request:
if "/metadata/identity/oauth2/token" in request.path:
return None
return request
class PFSProcessor(RecordingProcessor):
"""Sanitize request/response for PFS operations."""
def process_request(self, request: Request) -> Request:
if is_json_payload_request(request) and request.body is not None:
body = request.body.decode("utf-8")
body = sanitize_pfs_request_body(body)
request.body = body.encode("utf-8")
return request
def process_response(self, response: Dict) -> Dict:
if is_json_payload_response(response):
response["body"]["string"] = sanitize_pfs_response_body(response["body"]["string"])
return response
class UserInfoProcessor(RecordingProcessor):
"""Sanitize user object id and tenant id in responses."""
def __init__(self, user_object_id: str, tenant_id: str):
self.user_object_id = user_object_id
self.tenant_id = tenant_id
def process_request(self, request: Request) -> Request:
if is_json_payload_request(request) and request.body is not None:
body = request.body.decode("utf-8")
body = str(body).replace(self.user_object_id, SanitizedValues.USER_OBJECT_ID)
body = body.replace(self.tenant_id, SanitizedValues.TENANT_ID)
request.body = body.encode("utf-8")
return request
def process_response(self, response: Dict) -> Dict:
if is_json_payload_response(response):
response["body"]["string"] = str(response["body"]["string"]).replace(
self.user_object_id, SanitizedValues.USER_OBJECT_ID
)
response["body"]["string"] = str(response["body"]["string"]).replace(
self.tenant_id, SanitizedValues.TENANT_ID
)
return response
class IndexServiceProcessor(RecordingProcessor):
"""Sanitize index service responses."""
def process_response(self, response: Dict) -> Dict:
if is_json_payload_response(response):
if "continuationToken" in response["body"]["string"]:
body = json.loads(response["body"]["string"])
body.pop("continuationToken", None)
response["body"]["string"] = json.dumps(body)
return response
class EmailProcessor(RecordingProcessor):
"""Sanitize email address in responses."""
def process_response(self, response: Dict) -> Dict:
response["body"]["string"] = sanitize_email(response["body"]["string"])
return response
| promptflow/src/promptflow/tests/sdk_cli_azure_test/recording_utilities/processors.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_azure_test/recording_utilities/processors.py",
"repo_id": "promptflow",
"token_count": 4397
} | 61 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import pytest
from promptflow import PFClient
from promptflow._sdk._configuration import Configuration
AZUREML_RESOURCE_PROVIDER = "Microsoft.MachineLearningServices"
RESOURCE_ID_FORMAT = "/subscriptions/{}/resourceGroups/{}/providers/{}/workspaces/{}"
@pytest.fixture
def pf() -> PFClient:
return PFClient()
@pytest.fixture
def global_config(subscription_id: str, resource_group_name: str, workspace_name: str) -> None:
config = Configuration.get_instance()
if Configuration.CONNECTION_PROVIDER in config._config:
return
config.set_config(
Configuration.CONNECTION_PROVIDER,
"azureml:"
+ RESOURCE_ID_FORMAT.format(subscription_id, resource_group_name, AZUREML_RESOURCE_PROVIDER, workspace_name),
)
| promptflow/src/promptflow/tests/sdk_cli_global_config_test/conftest.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_global_config_test/conftest.py",
"repo_id": "promptflow",
"token_count": 293
} | 62 |
import json
import os
import re
import pytest
from promptflow._core.operation_context import OperationContext
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
def test_swagger(flow_serving_client):
swagger_dict = json.loads(flow_serving_client.get("/swagger.json").data.decode())
assert swagger_dict == {
"components": {"securitySchemes": {"bearerAuth": {"scheme": "bearer", "type": "http"}}},
"info": {
"title": "Promptflow[basic-with-connection] API",
"version": "1.0.0",
"x-flow-name": "basic-with-connection",
},
"openapi": "3.0.0",
"paths": {
"/score": {
"post": {
"requestBody": {
"content": {
"application/json": {
"example": {"text": "Hello World!"},
"schema": {
"properties": {"text": {"type": "string"}},
"required": ["text"],
"type": "object",
},
}
},
"description": "promptflow input data",
"required": True,
},
"responses": {
"200": {
"content": {
"application/json": {
"schema": {"properties": {"output_prompt": {"type": "string"}}, "type": "object"}
}
},
"description": "successful operation",
},
"400": {"description": "Invalid input"},
"default": {"description": "unexpected error"},
},
"summary": "run promptflow: basic-with-connection with an given input",
}
}
},
"security": [{"bearerAuth": []}],
}
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
def test_chat_swagger(serving_client_llm_chat):
swagger_dict = json.loads(serving_client_llm_chat.get("/swagger.json").data.decode())
assert swagger_dict == {
"components": {"securitySchemes": {"bearerAuth": {"scheme": "bearer", "type": "http"}}},
"info": {
"title": "Promptflow[chat_flow_with_stream_output] API",
"version": "1.0.0",
"x-flow-name": "chat_flow_with_stream_output",
"x-chat-history": "chat_history",
"x-chat-input": "question",
"x-flow-type": "chat",
"x-chat-output": "answer",
},
"openapi": "3.0.0",
"paths": {
"/score": {
"post": {
"requestBody": {
"content": {
"application/json": {
"example": {},
"schema": {
"properties": {
"chat_history": {
"type": "array",
"items": {"type": "object", "additionalProperties": {}},
},
"question": {"type": "string", "default": "What is ChatGPT?"},
},
"required": ["chat_history", "question"],
"type": "object",
},
}
},
"description": "promptflow input data",
"required": True,
},
"responses": {
"200": {
"content": {
"application/json": {
"schema": {"properties": {"answer": {"type": "string"}}, "type": "object"}
}
},
"description": "successful operation",
},
"400": {"description": "Invalid input"},
"default": {"description": "unexpected error"},
},
"summary": "run promptflow: chat_flow_with_stream_output with an given input",
}
}
},
"security": [{"bearerAuth": []}],
}
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
def test_user_agent(flow_serving_client):
operation_context = OperationContext.get_instance()
assert "test-user-agent" in operation_context.get_user_agent()
assert "promptflow-local-serving" in operation_context.get_user_agent()
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
def test_serving_api(flow_serving_client):
response = flow_serving_client.get("/health")
assert b'{"status":"Healthy","version":"0.0.1"}' in response.data
response = flow_serving_client.get("/")
print(response.data)
assert response.status_code == 200
response = flow_serving_client.post("/score", data=json.dumps({"text": "hi"}))
assert (
response.status_code == 200
), f"Response code indicates error {response.status_code} - {response.data.decode()}"
assert "output_prompt" in json.loads(response.data.decode())
# Assert environment variable resolved
assert os.environ["API_TYPE"] == "azure"
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
def test_evaluation_flow_serving_api(evaluation_flow_serving_client):
response = evaluation_flow_serving_client.post("/score", data=json.dumps({"url": "https://www.microsoft.com/"}))
assert (
response.status_code == 200
), f"Response code indicates error {response.status_code} - {response.data.decode()}"
assert "category" in json.loads(response.data.decode())
@pytest.mark.e2etest
def test_unknown_api(flow_serving_client):
response = flow_serving_client.get("/unknown")
assert b"not supported by current app" in response.data
assert response.status_code == 404
response = flow_serving_client.post("/health") # health api should be GET
assert b"not supported by current app" in response.data
assert response.status_code == 404
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
@pytest.mark.parametrize(
"accept, expected_status_code, expected_content_type",
[
("text/event-stream", 200, "text/event-stream; charset=utf-8"),
("text/html", 406, "application/json"),
("application/json", 200, "application/json"),
("*/*", 200, "application/json"),
("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
("application/json, */*", 200, "application/json"),
("", 200, "application/json"),
],
)
def test_stream_llm_chat(
serving_client_llm_chat,
accept,
expected_status_code,
expected_content_type,
):
payload = {
"question": "What is the capital of France?",
"chat_history": [],
}
headers = {
"Content-Type": "application/json",
"Accept": accept,
}
response = serving_client_llm_chat.post("/score", json=payload, headers=headers)
assert response.status_code == expected_status_code
assert response.content_type == expected_content_type
if response.status_code == 406:
assert response.json["error"]["code"] == "UserError"
assert (
f"Media type {accept} in Accept header is not acceptable. Supported media type(s) -"
in response.json["error"]["message"]
)
if "text/event-stream" in response.content_type:
for line in response.data.decode().split("\n"):
print(line)
else:
result = response.json
print(result)
@pytest.mark.e2etest
@pytest.mark.parametrize(
"accept, expected_status_code, expected_content_type",
[
("text/event-stream", 200, "text/event-stream; charset=utf-8"),
("text/html", 406, "application/json"),
("application/json", 200, "application/json"),
("*/*", 200, "application/json"),
("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
("application/json, */*", 200, "application/json"),
("", 200, "application/json"),
],
)
def test_stream_python_stream_tools(
serving_client_python_stream_tools,
accept,
expected_status_code,
expected_content_type,
):
payload = {
"text": "Hello World!",
}
headers = {
"Content-Type": "application/json",
"Accept": accept,
}
response = serving_client_python_stream_tools.post("/score", json=payload, headers=headers)
assert response.status_code == expected_status_code
assert response.content_type == expected_content_type
# The predefined flow in this test case is echo flow, which will return the input text.
# Check output as test logic validation.
# Stream generator generating logic
# - The output is split into words, and each word is sent as a separate event
# - Event data is a dict { $flowoutput_field_name : $word}
# - The event data is formatted as f"data: {json.dumps(data)}\n\n"
# - Generator will yield the event data for each word
if response.status_code == 200:
expected_output = f"Echo: {payload.get('text')}"
if "text/event-stream" in response.content_type:
words = expected_output.split()
lines = response.data.decode().split("\n\n")
# The last line is empty
lines = lines[:-1]
assert all(f"data: {json.dumps({'output_echo' : f'{w} '})}" == l for w, l in zip(words, lines))
else:
# For json response, iterator is joined into a string with "" as delimiter
words = expected_output.split()
merged_text = "".join(word + " " for word in words)
expected_json = {"output_echo": merged_text}
result = response.json
assert expected_json == result
elif response.status_code == 406:
assert response.json["error"]["code"] == "UserError"
assert (
f"Media type {accept} in Accept header is not acceptable. Supported media type(s) -"
in response.json["error"]["message"]
)
@pytest.mark.usefixtures("recording_injection")
@pytest.mark.e2etest
@pytest.mark.parametrize(
"accept, expected_status_code, expected_content_type",
[
("text/event-stream", 406, "application/json"),
("application/json", 200, "application/json"),
("*/*", 200, "application/json"),
("text/event-stream, application/json", 200, "application/json"),
("application/json, */*", 200, "application/json"),
("", 200, "application/json"),
],
)
def test_stream_python_nonstream_tools(
flow_serving_client,
accept,
expected_status_code,
expected_content_type,
):
payload = {
"text": "Hello World!",
}
headers = {
"Content-Type": "application/json",
"Accept": accept,
}
response = flow_serving_client.post("/score", json=payload, headers=headers)
if "text/event-stream" in response.content_type:
for line in response.data.decode().split("\n"):
print(line)
else:
result = response.json
print(result)
assert response.status_code == expected_status_code
assert response.content_type == expected_content_type
@pytest.mark.usefixtures("serving_client_image_python_flow", "recording_injection", "setup_local_connection")
@pytest.mark.e2etest
def test_image_flow(serving_client_image_python_flow, sample_image):
response = serving_client_image_python_flow.post("/score", data=json.dumps({"image": sample_image}))
assert (
response.status_code == 200
), f"Response code indicates error {response.status_code} - {response.data.decode()}"
response = json.loads(response.data.decode())
assert {"output"} == response.keys()
key_regex = re.compile(r"data:image/(.*);base64")
assert re.match(key_regex, list(response["output"].keys())[0])
@pytest.mark.usefixtures("serving_client_composite_image_flow", "recording_injection", "setup_local_connection")
@pytest.mark.e2etest
def test_list_image_flow(serving_client_composite_image_flow, sample_image):
image_dict = {"data:image/jpg;base64": sample_image}
response = serving_client_composite_image_flow.post(
"/score", data=json.dumps({"image_list": [image_dict], "image_dict": {"my_image": image_dict}})
)
assert (
response.status_code == 200
), f"Response code indicates error {response.status_code} - {response.data.decode()}"
response = json.loads(response.data.decode())
assert {"output"} == response.keys()
assert (
"data:image/jpg;base64" in response["output"][0]
), f"data:image/jpg;base64 not in output list {response['output']}"
@pytest.mark.usefixtures("serving_client_with_environment_variables")
@pytest.mark.e2etest
def test_flow_with_environment_variables(serving_client_with_environment_variables):
except_environment_variables = {
"env1": "2",
"env2": "runtime_env2",
"env3": "[1, 2, 3, 4, 5]",
"env4": '{"a": 1, "b": "2"}',
"env10": "aaaaa",
}
for key, value in except_environment_variables.items():
response = serving_client_with_environment_variables.post("/score", data=json.dumps({"key": key}))
assert (
response.status_code == 200
), f"Response code indicates error {response.status_code} - {response.data.decode()}"
response = json.loads(response.data.decode())
assert {"output"} == response.keys()
assert response["output"] == value
| promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_flow_serve.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_flow_serve.py",
"repo_id": "promptflow",
"token_count": 6615
} | 63 |
from pathlib import Path
import pytest
from sdk_cli_test.conftest import MODEL_ROOT
from promptflow._cli._pf._flow import _resolve_python_flow_additional_includes
@pytest.mark.unittest
def test_flow_serve_resolve_additional_includes():
# Assert flow path not changed if no additional includes
flow_path = (Path(MODEL_ROOT) / "web_classification").resolve().absolute().as_posix()
resolved_flow_path = _resolve_python_flow_additional_includes(flow_path)
assert flow_path == resolved_flow_path
# Assert additional includes are resolved correctly
flow_path = (Path(MODEL_ROOT) / "web_classification_with_additional_include").resolve().absolute().as_posix()
resolved_flow_path = _resolve_python_flow_additional_includes(flow_path)
assert (Path(resolved_flow_path) / "convert_to_dict.py").exists()
assert (Path(resolved_flow_path) / "fetch_text_content_from_url.py").exists()
assert (Path(resolved_flow_path) / "summarize_text_content.jinja2").exists()
| promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_flow_serve.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_flow_serve.py",
"repo_id": "promptflow",
"token_count": 346
} | 64 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import pytest
from ..utils import PFSOperations, check_activity_end_telemetry
@pytest.mark.usefixtures("use_secrets_config_file")
@pytest.mark.e2etest
class TestTelemetryAPIs:
def test_post_telemetry(self, pfs_op: PFSOperations) -> None:
from promptflow._sdk._telemetry.activity import generate_request_id
request_id = generate_request_id()
user_agent = "prompt-flow-extension/1.8.0 (win32; x64) VS/0.0.1"
_ = pfs_op.create_telemetry(
body={
"eventType": "Start",
"timestamp": "2021-01-01T00:00:00Z",
"metadata": {
"activityName": "pf.flow.test",
"activityType": "InternalCall",
},
},
status_code=200,
headers={
"x-ms-promptflow-request-id": request_id,
"User-Agent": user_agent,
},
).json
with check_activity_end_telemetry(
activity_name="pf.flow.test",
activity_type="InternalCall",
user_agent=f"{user_agent} local_pfs/0.0.1",
request_id=request_id,
):
response = pfs_op.create_telemetry(
body={
"eventType": "End",
"timestamp": "2021-01-01T00:00:00Z",
"metadata": {
"activityName": "pf.flow.test",
"activityType": "InternalCall",
"completionStatus": "Success",
"durationMs": 1000,
},
},
headers={
"x-ms-promptflow-request-id": request_id,
"User-Agent": user_agent,
},
status_code=200,
).json
assert len(response) >= 1
| promptflow/src/promptflow/tests/sdk_pfs_test/e2etests/test_telemetry_apis.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_pfs_test/e2etests/test_telemetry_apis.py",
"repo_id": "promptflow",
"token_count": 1088
} | 65 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/SerpConnection.schema.json
name: my_serp_connection
type: serp
api_key: "<to-be-replaced>"
| promptflow/src/promptflow/tests/test_configs/connections/serp_connection.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/connections/serp_connection.yaml",
"repo_id": "promptflow",
"token_count": 61
} | 66 |
{"name": "Red", "id_text": "1.0", "id_int": 1, "id_float": 1.0 }
{"name": "Blue", "id_text": "3.0", "id_int": 3, "id_float": 3.0 }
{"name": "Yellow", "id_text": "2.0", "id_int": 2, "id_float": 2.0 } | promptflow/src/promptflow/tests/test_configs/datas/load_data_cases/colors.jsonl/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/datas/load_data_cases/colors.jsonl",
"repo_id": "promptflow",
"token_count": 98
} | 67 |
import asyncio
from promptflow import trace
@trace
async def wait(n: int):
await asyncio.sleep(n)
@trace
async def dummy_llm(prompt: str, model: str, wait_seconds: int):
await wait(wait_seconds)
return prompt
async def my_flow(text: str, models: list = []):
tasks = []
for i, model in enumerate(models):
tasks.append(asyncio.create_task(dummy_llm(text, model, i + 1)))
await asyncio.wait(tasks)
return "dummy_output"
| promptflow/src/promptflow/tests/test_configs/eager_flows/dummy_flow_with_trace/entry.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/eager_flows/dummy_flow_with_trace/entry.py",
"repo_id": "promptflow",
"token_count": 180
} | 68 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
def my_flow1():
"""Simple flow without yaml."""
print("Hello world!")
def my_flow2():
"""Simple flow without yaml."""
print("Hello world!")
| promptflow/src/promptflow/tests/test_configs/eager_flows/multiple_entries/entry1.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/eager_flows/multiple_entries/entry1.py",
"repo_id": "promptflow",
"token_count": 83
} | 69 |
from promptflow import tool
@tool
def summary_result(input1: str="Node A not executed.", input2: str="Node B not executed.") -> str:
return input1 + ' ' + input2
| promptflow/src/promptflow/tests/test_configs/flows/activate_condition_always_met/summary_result.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/activate_condition_always_met/summary_result.py",
"repo_id": "promptflow",
"token_count": 50
} | 70 |
{
"text": "hi"
} | promptflow/src/promptflow/tests/test_configs/flows/all_depedencies_bypassed_with_activate_met/inputs.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/all_depedencies_bypassed_with_activate_met/inputs.json",
"repo_id": "promptflow",
"token_count": 13
} | 71 |
inputs:
text:
type: string
default: dummy_input
outputs:
output_prompt:
type: string
reference: ${async_fail.output}
nodes:
- name: async_fail
type: python
source:
type: code
path: async_fail.py
inputs:
s: ${inputs.text}
| promptflow/src/promptflow/tests/test_configs/flows/async_tools_failures/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/async_tools_failures/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 110
} | 72 |
environment:
python_requirements_txt: requirements.txt
version: 2
inputs:
chat_history:
type: list
is_chat_history: true
default: []
question:
type: string
is_chat_input: true
default: I am going to swim today for 30 min in Guangzhou city, how much
calories will I burn?
assistant_id:
type: string
default: ""
thread_id:
type: string
default: ""
outputs:
answer:
type: string
reference: ${assistant.output}
is_chat_output: true
thread_id:
type: string
reference: ${get_or_create_thread.output}
nodes:
- name: get_or_create_thread
type: python
source:
type: code
path: get_or_create_thread.py
inputs:
conn: chw-manager-OpenAI
thread_id: ${inputs.thread_id}
- name: assistant
type: python
source:
type: code
path: add_message_and_run.py
inputs:
conn: chw-manager-OpenAI
message: ${inputs.question}
assistant_id: ${inputs.assistant_id}
thread_id: ${get_or_create_thread.output}
download_images: true
assistant_definition: assistant_definition.yaml
| promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 420
} | 73 |
inputs:
chat_history:
type: list
question:
type: string
is_chat_input: true
default: What is ChatGPT?
outputs:
answer:
type: string
reference: ${show_answer.output}
is_chat_output: true
nodes:
- inputs:
deployment_name: gpt-35-turbo
max_tokens: "256"
temperature: "0.7"
chat_history: ${inputs.chat_history}
question: ${inputs.question}
name: chat
type: llm
source:
type: code
path: chat.jinja2
api: chat
provider: AzureOpenAI
connection: azure_open_ai_connection
- name: show_answer
type: python
source:
type: code
path: show_answer.py
inputs:
chat_answer: ${chat.output} | promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_exception/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_exception/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 272
} | 74 |
from promptflow import tool
@tool
def extract_incident_id(incident_content: str, incident_id: int):
if incident_id >= 0 and incident_id < 3:
return {
"has_incident_id": True,
"incident_id": incident_id,
"incident_content": incident_content
}
return {
"has_incident_id": False,
"incident_id": incident_id,
"incident_content": incident_content
} | promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/incident_id_extractor.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/incident_id_extractor.py",
"repo_id": "promptflow",
"token_count": 196
} | 75 |
from promptflow import tool
@tool
def square(input: int) -> int:
return input*input
| promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_aggregate_bypassed/square.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_aggregate_bypassed/square.py",
"repo_id": "promptflow",
"token_count": 27
} | 76 |
inputs:
image_list:
type: list
default:
- data:image/jpg;path: logo.jpg
- data:image/png;path: logo_2.png
image_dict:
type: object
default:
image_1:
data:image/jpg;path: logo.jpg
image_2:
data:image/png;path: logo_2.png
outputs:
output:
type: list
reference: ${python_node.output}
nodes:
- name: python_node
type: python
source:
type: code
path: passthrough_list.py
inputs:
image_list: ${inputs.image_list}
image_dict: ${inputs.image_dict}
- name: aggregate_1
type: python
source:
type: code
path: merge_images.py
inputs:
image_list: ${python_node.output}
image_dict:
- image_1:
data:image/jpg;path: logo.jpg
image_2:
data:image/png;path: logo_2.png
aggregation: true
- name: aggregate_2
type: python
source:
type: code
path: merge_images.py
inputs:
image_list: ${python_node.output}
image_dict: ${inputs.image_dict}
aggregation: true
| promptflow/src/promptflow/tests/test_configs/flows/eval_flow_with_composite_image/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/eval_flow_with_composite_image/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 439
} | 77 |
*.ipynb
.venv/
.data/
.env
.vscode/
outputs/
connection.json
.gitignore
README.md
eval_cli.md
data/
| promptflow/src/promptflow/tests/test_configs/flows/export/linux/flow/.amlignore/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/export/linux/flow/.amlignore",
"repo_id": "promptflow",
"token_count": 49
} | 78 |
import os
import openai
from dotenv import load_dotenv
from promptflow import tool
# The inputs section will change based on the arguments of the tool function, after you save the code
# Adding type to arguments and return value will help the system show the types properly
# Please update the function name/signature per need
def to_bool(value) -> bool:
return str(value).lower() == "true"
@tool
def my_python_tool(input1: str) -> str:
return 'hello '
| promptflow/src/promptflow/tests/test_configs/flows/failed_flow/hello.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/failed_flow/hello.py",
"repo_id": "promptflow",
"token_count": 127
} | 79 |
tensorflow | promptflow/src/promptflow/tests/test_configs/flows/flow_with_environment/requirements/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_environment/requirements",
"repo_id": "promptflow",
"token_count": 3
} | 80 |
import os
from langchain.chat_models import AzureChatOpenAI
from langchain_core.messages import HumanMessage
from langchain.agents.agent_types import AgentType
from langchain.agents.initialize import initialize_agent
from langchain.agents.load_tools import load_tools
from promptflow import tool
from promptflow.connections import AzureOpenAIConnection
from promptflow.integrations.langchain import PromptFlowCallbackHandler
@tool
def test_langchain_traces(question: str, conn: AzureOpenAIConnection):
os.environ["AZURE_OPENAI_API_KEY"] = conn.api_key
os.environ["OPENAI_API_VERSION"] = conn.api_version
os.environ["AZURE_OPENAI_ENDPOINT"] = conn.api_base
model = AzureChatOpenAI(
temperature=0.7,
azure_deployment="gpt-35-turbo",
)
tools = load_tools(["llm-math"], llm=model)
# Please keep use agent to enable customized CallBack handler
agent = initialize_agent(
tools, model, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=False,
callbacks=[PromptFlowCallbackHandler()]
)
message = HumanMessage(
content=question
)
try:
return agent.run(message)
except Exception as e:
return str(e)
| promptflow/src/promptflow/tests/test_configs/flows/flow_with_langchain_traces/test_langchain_traces.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_langchain_traces/test_langchain_traces.py",
"repo_id": "promptflow",
"token_count": 434
} | 81 |
{"text": "Hello World!"}
| promptflow/src/promptflow/tests/test_configs/flows/flow_with_script_tool_with_custom_strong_type_connection/data.jsonl/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_script_tool_with_custom_strong_type_connection/data.jsonl",
"repo_id": "promptflow",
"token_count": 9
} | 82 |
from promptflow import tool
from char_generator import character_generator
@tool
def echo(text):
"""Echo the input string."""
echo_text = "Echo - " + "".join(character_generator(text))
return echo_text | promptflow/src/promptflow/tests/test_configs/flows/generator_tools/echo.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/generator_tools/echo.py",
"repo_id": "promptflow",
"token_count": 71
} | 83 |
from promptflow import tool
from promptflow.connections import AzureOpenAIConnection
@tool
def conn_tool(conn: AzureOpenAIConnection):
assert isinstance(conn, AzureOpenAIConnection)
return conn.api_base | promptflow/src/promptflow/tests/test_configs/flows/llm_connection_override/conn_tool.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/llm_connection_override/conn_tool.py",
"repo_id": "promptflow",
"token_count": 68
} | 84 |
inputs:
number:
type: int
outputs:
output:
type: int
reference: ${mod_three.output.value}
nodes:
- name: mod_three
type: python
source:
type: code
path: mod_three.py
inputs:
number: ${inputs.number}
| promptflow/src/promptflow/tests/test_configs/flows/mod-n/three/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/mod-n/three/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 99
} | 85 |
{
"question": "What is the capital of the United States of America?",
"chat_history": []
}
| promptflow/src/promptflow/tests/test_configs/flows/openai_chat_api_flow/samples.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/openai_chat_api_flow/samples.json",
"repo_id": "promptflow",
"token_count": 33
} | 86 |
import os
from promptflow import tool
from promptflow.connections import CustomConnection
@tool
def print_secret(text: str, connection: CustomConnection):
print(connection["key1"])
print(connection["key2"])
return text
| promptflow/src/promptflow/tests/test_configs/flows/print_secret_flow/print_secret.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/print_secret_flow/print_secret.py",
"repo_id": "promptflow",
"token_count": 69
} | 87 |
inputs:
image_list:
type: list
default:
- data:image/jpg;path: logo.jpg
- data:image/png;path: logo_2.png
image_dict:
type: object
default:
image_1:
data:image/jpg;path: logo.jpg
image_2:
data:image/png;path: logo_2.png
outputs:
output:
type: list
reference: ${python_node_3.output}
nodes:
- name: python_node
type: python
source:
type: code
path: passthrough_list.py
inputs:
image_list: ${inputs.image_list}
image_dict: ${inputs.image_dict}
- name: python_node_2
type: python
source:
type: code
path: passthrough_dict.py
inputs:
image_list:
- data:image/jpg;path: logo.jpg
- data:image/png;path: logo_2.png
image_dict:
image_1:
data:image/jpg;path: logo.jpg
image_2:
data:image/png;path: logo_2.png
- name: python_node_3
type: python
source:
type: code
path: passthrough_list.py
inputs:
image_list: ${python_node.output}
image_dict: ${python_node_2.output}
| promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_composite_image/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_composite_image/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 468
} | 88 |
system:
Don't make assumptions about what values to plug into functions. Ask for clarification if a user request is ambiguous.
{% for item in chat_history %}
user:
{{item.inputs.question}}
{% if 'function_call' in item.outputs.llm_output %}
assistant:
Function generation requested, function = {{item.outputs.llm_output.function_call.name}}, args = {{item.outputs.llm_output.function_call.arguments}}
function:
name:
{{item.outputs.llm_output.function_call.name}}
content:
{{item.outputs.answer}}
{% else %}
assistant:
{{item.outputs.llm_output}}}}
{% endif %}}
{% endfor %}
user:
{{question}} | promptflow/src/promptflow/tests/test_configs/flows/sample_flow_with_functions/use_functions_with_chat_models.jinja2/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/sample_flow_with_functions/use_functions_with_chat_models.jinja2",
"repo_id": "promptflow",
"token_count": 204
} | 89 |
from promptflow import tool
@tool
def print_special_character(input1: str) -> str:
# Add special character to test if file read is working.
return "https://www.bing.com//"
| promptflow/src/promptflow/tests/test_configs/flows/script_with_special_character/script_with_special_character.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/script_with_special_character/script_with_special_character.py",
"repo_id": "promptflow",
"token_count": 61
} | 90 |
[
{
"input": "atom",
"index": 0
},
{
"input": "atom",
"index": 6
},
{
"input": "atom",
"index": 12
},{
"input": "atom",
"index": 18
},{
"input": "atom",
"index": 24
},{
"input": "atom",
"index": 30
},{
"input": "atom",
"index": 36
},{
"input": "atom",
"index": 42
},{
"input": "atom",
"index": 48
},{
"input": "atom",
"index": 54
}
] | promptflow/src/promptflow/tests/test_configs/flows/simple_flow_with_ten_inputs/samples.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/simple_flow_with_ten_inputs/samples.json",
"repo_id": "promptflow",
"token_count": 239
} | 91 |
{"url": "https://www.youtube.com/watch?v=kYqRtjDBci8", "answer": "Channel", "evidence": "Both"}
{"url": "https://arxiv.org/abs/2307.04767", "answer": "Academic", "evidence": "Both"}
{"url": "https://play.google.com/store/apps/details?id=com.twitter.android", "answer": "App", "evidence": "Both"}
| promptflow/src/promptflow/tests/test_configs/flows/web_classification/data.jsonl/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/web_classification/data.jsonl",
"repo_id": "promptflow",
"token_count": 112
} | 92 |
#!/bin/bash
# Install your packages here.
| promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/.promptflow/flow.env_files/setup.sh/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/.promptflow/flow.env_files/setup.sh",
"repo_id": "promptflow",
"token_count": 13
} | 93 |
Your task is to classify a given url into one of the following types:
Movie, App, Academic, Channel, Profile, PDF or None based on the text content information.
The classification will be based on the url, the webpage text content summary, or both.
Here are a few examples:
{% for ex in examples %}
URL: {{ex.url}}
Text content: {{ex.text_content}}
OUTPUT:
{"category": "{{ex.category}}", "evidence": "{{ex.evidence}}"}
{% endfor %}
For a given URL : {{url}}, and text content: {{text_content}}.
Classify above url to complete the category and indicate evidence.
OUTPUT:
| promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants_unordered/classify_with_llm.jinja2/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants_unordered/classify_with_llm.jinja2",
"repo_id": "promptflow",
"token_count": 165
} | 94 |
Please summarize some keywords of this paragraph and have some details of each keywords.
Do not add any information that is not in the text.
Text: {{text}}
Summary:
| promptflow/src/promptflow/tests/test_configs/flows/web_classification_v1/summarize_text_content__variant_1.jinja2/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/web_classification_v1/summarize_text_content__variant_1.jinja2",
"repo_id": "promptflow",
"token_count": 39
} | 95 |
from promptflow import tool
@tool
def convert_to_dict(input_str: str):
raise Exception("mock exception")
| promptflow/src/promptflow/tests/test_configs/flows/web_classification_with_exception/convert_to_dict.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/web_classification_with_exception/convert_to_dict.py",
"repo_id": "promptflow",
"token_count": 36
} | 96 |
interactions:
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000",
"name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location":
"eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic",
"tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}'
headers:
cache-control:
- no-cache
content-length:
- '3630'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.028'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false
response:
body:
string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}]}'
headers:
cache-control:
- no-cache
content-length:
- '1372'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.220'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.152'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.251'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:45:32 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/webClassification1.jsonl
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '127'
content-md5:
- i/8q1x5YKzHv3Fd/R8lYUQ==
content-type:
- application/octet-stream
last-modified:
- Fri, 28 Jul 2023 12:34:52 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Fri, 28 Jul 2023 12:34:52 GMT
x-ms-meta-name:
- 13fa99dd-c98e-4f2a-a704-4295d4ed6f68
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- 0367c5c6-9f53-4a75-8623-7e53699f0d0b
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:45:33 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/webClassification1.jsonl
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.072'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.149'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:45:37 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '853'
content-md5:
- ylTeNqjvuOvtzEZJ/X5n3A==
content-type:
- application/octet-stream
last-modified:
- Fri, 12 Jan 2024 08:13:57 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Fri, 12 Jan 2024 08:13:56 GMT
x-ms-meta-name:
- 950201e8-c52c-4b15-ada1-5e58de9b2f4d
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- '1'
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:45:38 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath":
"LocalUpload/000000000000000000000000000000000000/web_classification/flow.dag.yaml",
"runId": "name", "runDisplayName": "name", "runExperimentName": "", "nodeVariant":
"${summarize_text_content.variant_0}", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/webClassification1.jsonl"},
"inputsMapping": {"url": "${data.url}"}, "connections": {}, "environmentVariables":
{}, "runtimeName": "fake-runtime-name", "sessionId": "000000000000000000000000000000000000000000000000",
"sessionSetupMode": "SystemWait", "flowLineageId": "0000000000000000000000000000000000000000000000000000000000000000",
"runDisplayNameGenerationType": "UserProvidedMacro"}'
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '873'
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit
response:
body:
string: '"name"'
headers:
connection:
- keep-alive
content-length:
- '38'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
x-content-type-options:
- nosniff
x-request-time:
- '7.296'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "fetch_text_content_from_url", "type":
"python", "source": {"type": "code", "path": "fetch_text_content_from_url.py"},
"inputs": {"fetch_url": "${inputs.url}"}, "tool": "fetch_text_content_from_url.py",
"reduce": false}, {"name": "prepare_examples", "type": "python", "source":
{"type": "code", "path": "prepare_examples.py"}, "inputs": {}, "tool": "prepare_examples.py",
"reduce": false}, {"name": "classify_with_llm", "type": "llm", "source": {"type":
"code", "path": "classify_with_llm.jinja2"}, "inputs": {"deployment_name":
"gpt-35-turbo", "suffix": "", "max_tokens": "128", "temperature": "0.1", "top_p":
"1.0", "logprobs": "", "echo": "False", "stop": "", "presence_penalty": "0",
"frequency_penalty": "0", "best_of": "1", "logit_bias": "", "url": "${inputs.url}",
"examples": "${prepare_examples.output}", "text_content": "${summarize_text_content.output}"},
"tool": "classify_with_llm.jinja2", "reduce": false, "api": "chat", "provider":
"AzureOpenAI", "connection": "azure_open_ai_connection", "module": "promptflow.tools.aoai"},
{"name": "convert_to_dict", "type": "python", "source": {"type": "code", "path":
"convert_to_dict.py"}, "inputs": {"input_str": "${classify_with_llm.output}"},
"tool": "convert_to_dict.py", "reduce": false}, {"name": "summarize_text_content",
"type": "llm", "source": {"type": "code", "path": "summarize_text_content.jinja2"},
"inputs": {"deployment_name": "gpt-35-turbo", "suffix": "", "max_tokens":
"128", "temperature": "0.2", "top_p": "1.0", "logprobs": "", "echo": "False",
"stop": "", "presence_penalty": "0", "frequency_penalty": "0", "best_of":
"1", "logit_bias": "", "text": "${fetch_text_content_from_url.output}"}, "tool":
"summarize_text_content.jinja2", "reduce": false, "api": "chat", "provider":
"AzureOpenAI", "connection": "azure_open_ai_connection", "module": "promptflow.tools.aoai"}],
"tools": [{"name": "Content Safety (Text Analyze)", "type": "python", "inputs":
{"connection": {"type": ["AzureContentSafetyConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "hate_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "self_harm_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "sexual_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "text": {"type":
["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "violence_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}},
"description": "Use Azure Content Safety to detect harmful content.", "module":
"promptflow.tools.azure_content_safety", "function": "analyze_text", "is_builtin":
true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "classify_with_llm.jinja2", "type":
"prompt", "inputs": {"examples": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "text_content":
{"type": ["string"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "url": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "classify_with_llm.jinja2",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}, {"name":
"convert_to_dict.py", "type": "python", "inputs": {"input_str": {"type": ["string"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}},
"source": "convert_to_dict.py", "function": "convert_to_dict", "is_builtin":
false, "enable_kwargs": false, "tool_state": "stable"}, {"name": "fetch_text_content_from_url.py",
"type": "python", "inputs": {"fetch_url": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "fetch_text_content_from_url.py",
"function": "fetch_text_content_from_url", "is_builtin": false, "enable_kwargs":
false, "tool_state": "stable"}, {"name": "prepare_examples.py", "type": "python",
"source": "prepare_examples.py", "function": "prepare_examples", "is_builtin":
false, "enable_kwargs": false, "tool_state": "stable"}, {"name": "summarize_text_content.jinja2",
"type": "prompt", "inputs": {"text": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "summarize_text_content.jinja2",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}, {"name":
"summarize_text_content__variant_1.jinja2", "type": "prompt", "inputs": {"text":
{"type": ["string"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "source": "summarize_text_content__variant_1.jinja2",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"url": {"type": "string", "default": "https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h",
"is_chat_input": false}}, "outputs": {"category": {"type": "string", "reference":
"${convert_to_dict.output.category}", "evaluation_only": false, "is_chat_output":
false}, "evidence": {"type": "string", "reference": "${convert_to_dict.output.evidence}",
"evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId":
"azureml://locations/eastus/workspaces/00000/flows/name/flowRuns/name", "flowRunId":
"name", "flowRunDisplayName": "name", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/107bd3498e44deb2dccc53d2208d32b2/webClassification1.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"url": "${data.url}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "b9ab955a-e7fa-4785-8ee2-77aeca5d8816",
"studioPortalEndpoint": "https://ml.azure.com/runs/name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '16239'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.389'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- text/plain, application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name/cancel
response:
body:
string: ''
headers:
connection:
- keep-alive
content-length:
- '0'
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
x-content-type-options:
- nosniff
x-request-time:
- '0.465'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "fetch_text_content_from_url", "type":
"python", "source": {"type": "code", "path": "fetch_text_content_from_url.py"},
"inputs": {"fetch_url": "${inputs.url}"}, "tool": "fetch_text_content_from_url.py",
"reduce": false}, {"name": "prepare_examples", "type": "python", "source":
{"type": "code", "path": "prepare_examples.py"}, "inputs": {}, "tool": "prepare_examples.py",
"reduce": false}, {"name": "classify_with_llm", "type": "llm", "source": {"type":
"code", "path": "classify_with_llm.jinja2"}, "inputs": {"deployment_name":
"gpt-35-turbo", "suffix": "", "max_tokens": "128", "temperature": "0.1", "top_p":
"1.0", "logprobs": "", "echo": "False", "stop": "", "presence_penalty": "0",
"frequency_penalty": "0", "best_of": "1", "logit_bias": "", "url": "${inputs.url}",
"examples": "${prepare_examples.output}", "text_content": "${summarize_text_content.output}"},
"tool": "classify_with_llm.jinja2", "reduce": false, "api": "chat", "provider":
"AzureOpenAI", "connection": "azure_open_ai_connection", "module": "promptflow.tools.aoai"},
{"name": "convert_to_dict", "type": "python", "source": {"type": "code", "path":
"convert_to_dict.py"}, "inputs": {"input_str": "${classify_with_llm.output}"},
"tool": "convert_to_dict.py", "reduce": false}, {"name": "summarize_text_content",
"type": "llm", "source": {"type": "code", "path": "summarize_text_content.jinja2"},
"inputs": {"deployment_name": "gpt-35-turbo", "suffix": "", "max_tokens":
"128", "temperature": "0.2", "top_p": "1.0", "logprobs": "", "echo": "False",
"stop": "", "presence_penalty": "0", "frequency_penalty": "0", "best_of":
"1", "logit_bias": "", "text": "${fetch_text_content_from_url.output}"}, "tool":
"summarize_text_content.jinja2", "reduce": false, "api": "chat", "provider":
"AzureOpenAI", "connection": "azure_open_ai_connection", "module": "promptflow.tools.aoai"}],
"tools": [{"name": "Content Safety (Text Analyze)", "type": "python", "inputs":
{"connection": {"type": ["AzureContentSafetyConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "hate_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "self_harm_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "sexual_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "text": {"type":
["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "violence_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}},
"description": "Use Azure Content Safety to detect harmful content.", "module":
"promptflow.tools.azure_content_safety", "function": "analyze_text", "is_builtin":
true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "classify_with_llm.jinja2", "type":
"prompt", "inputs": {"examples": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "text_content":
{"type": ["string"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "url": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "classify_with_llm.jinja2",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}, {"name":
"convert_to_dict.py", "type": "python", "inputs": {"input_str": {"type": ["string"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}},
"source": "convert_to_dict.py", "function": "convert_to_dict", "is_builtin":
false, "enable_kwargs": false, "tool_state": "stable"}, {"name": "fetch_text_content_from_url.py",
"type": "python", "inputs": {"fetch_url": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "fetch_text_content_from_url.py",
"function": "fetch_text_content_from_url", "is_builtin": false, "enable_kwargs":
false, "tool_state": "stable"}, {"name": "prepare_examples.py", "type": "python",
"source": "prepare_examples.py", "function": "prepare_examples", "is_builtin":
false, "enable_kwargs": false, "tool_state": "stable"}, {"name": "summarize_text_content.jinja2",
"type": "prompt", "inputs": {"text": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "summarize_text_content.jinja2",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}, {"name":
"summarize_text_content__variant_1.jinja2", "type": "prompt", "inputs": {"text":
{"type": ["string"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "source": "summarize_text_content__variant_1.jinja2",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"url": {"type": "string", "default": "https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h",
"is_chat_input": false}}, "outputs": {"category": {"type": "string", "reference":
"${convert_to_dict.output.category}", "evaluation_only": false, "is_chat_output":
false}, "evidence": {"type": "string", "reference": "${convert_to_dict.output.evidence}",
"evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId":
"azureml://locations/eastus/workspaces/00000/flows/name/flowRuns/name", "flowRunId":
"name", "flowRunDisplayName": "name", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/107bd3498e44deb2dccc53d2208d32b2/webClassification1.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"url": "${data.url}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "b9ab955a-e7fa-4785-8ee2-77aeca5d8816",
"studioPortalEndpoint": "https://ml.azure.com/runs/name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '16239'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.305'
status:
code: 200
message: OK
- request:
body: '{"runId": "name", "selectRunMetadata": true, "selectRunDefinition": true,
"selectJobSpecification": true}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '137'
Content-Type:
- application/json
User-Agent:
- python-requests/2.31.0
method: POST
uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata
response:
body:
string: '{"runMetadata": {"runNumber": 1705049145, "rootRunId": "name", "createdUtc":
"2024-01-12T08:45:45.0506879+00:00", "createdBy": {"userObjectId": "00000000-0000-0000-0000-000000000000",
"userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587",
"upn": null}, "userId": "00000000-0000-0000-0000-000000000000", "token": null,
"tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 4,
"statusRevision": 2, "runUuid": "87e3f885-8ac4-41e5-88ac-2f4c18c6b260", "parentRunUuid":
null, "rootRunUuid": "87e3f885-8ac4-41e5-88ac-2f4c18c6b260", "lastStartTimeUtc":
"2024-01-12T08:45:57.6980015+00:00", "currentComputeTime": "00:00:00", "computeDuration":
null, "effectiveStartTimeUtc": "2024-01-12T08:45:57.6980015+00:00", "lastModifiedBy":
{"userObjectId": "00000000-0000-0000-0000-000000000000", "userPuId": null,
"userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587",
"upn": null}, "lastModifiedUtc": "2024-01-12T08:45:49.5053048+00:00", "duration":
null, "cancelationReason": null, "currentAttemptId": 1, "runId": "name", "parentRunId":
null, "experimentId": "d30efbeb-f81d-4cfa-b5cc-a0570a049009", "status": "CancelRequested",
"startTimeUtc": "2024-01-12T08:45:57.6980015+00:00", "endTimeUtc": null, "scheduleId":
null, "displayName": "name", "name": null, "dataContainerId": "dcid.name",
"description": null, "hidden": false, "runType": "azureml.promptflow.FlowRun",
"runTypeV2": {"orchestrator": null, "traits": [], "attribution": "PromptFlow",
"computeType": "AmlcDsi"}, "properties": {"azureml.promptflow.runtime_name":
"test-runtime-ci", "azureml.promptflow.runtime_version": "20231204.v4", "azureml.promptflow.definition_file_name":
"flow.dag.yaml", "azureml.promptflow.session_id": "4dd8f4d5f44dfeb817d3438cf84bd739215d87afd9458597",
"azureml.promptflow.flow_lineage_id": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0",
"azureml.promptflow.node_variant": "${summarize_text_content.variant_0}",
"azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore",
"azureml.promptflow.flow_definition_blob_path": "LocalUpload/a1fa6ef1ead7ff3ce76b36250f6f5461/web_classification/flow.dag.yaml",
"azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/107bd3498e44deb2dccc53d2208d32b2/webClassification1.jsonl",
"azureml.promptflow.inputs_mapping": "{\"url\":\"${data.url}\"}", "_azureml.evaluation_run":
"promptflow.BatchRun", "azureml.promptflow.snapshot_id": "b9ab955a-e7fa-4785-8ee2-77aeca5d8816"},
"parameters": {}, "actionUris": {}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets":
[], "tags": {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets":
[], "runDefinition": null, "jobSpecification": null, "primaryMetricName":
null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri":
null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace":
false, "queueingInfo": null, "inputs": null, "outputs": null}, "runDefinition":
null, "jobSpecification": null, "systemSettings": null}'
headers:
connection:
- keep-alive
content-length:
- '4110'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.058'
status:
code: 200
message: OK
version: 1
| promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_cancel_run.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_cancel_run.yaml",
"repo_id": "promptflow",
"token_count": 26473
} | 97 |
interactions:
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000",
"name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location":
"eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic",
"tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}'
headers:
cache-control:
- no-cache
content-length:
- '3630'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.024'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false
response:
body:
string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}]}'
headers:
cache-control:
- no-cache
content-length:
- '1372'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.095'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.097'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.163'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:01:09 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '379'
content-md5:
- lI/pz9jzTQ7Td3RHPL7y7w==
content-type:
- application/octet-stream
last-modified:
- Mon, 06 Nov 2023 08:30:18 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Mon, 06 Nov 2023 08:30:18 GMT
x-ms-meta-name:
- 94331215-cf7f-452a-9f1a-1d276bc9b0e4
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- 3f163752-edb0-4afc-a6f5-b0a670bd7c24
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:01:10 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/webClassification3.jsonl
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.075'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.082'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:01:14 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/partial_fail/data.jsonl
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '52'
content-md5:
- kHimciLnA7d3/I2LBUeLNA==
content-type:
- application/octet-stream
last-modified:
- Fri, 22 Sep 2023 09:37:31 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Fri, 22 Sep 2023 09:37:30 GMT
x-ms-meta-name:
- aa1844d8-4898-4daa-8100-6140558fc7c9
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- '1'
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:01:15 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/partial_fail/data.jsonl
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath":
"LocalUpload/000000000000000000000000000000000000/partial_fail/flow.dag.yaml",
"runId": "failed_run_name", "runDisplayName": "sdk-cli-test-fixture-failed-run",
"runExperimentName": "", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl"},
"inputsMapping": {}, "connections": {}, "environmentVariables": {}, "runtimeName":
"fake-runtime-name", "sessionId": "000000000000000000000000000000000000000000000000",
"sessionSetupMode": "SystemWait", "flowLineageId": "0000000000000000000000000000000000000000000000000000000000000000",
"runDisplayNameGenerationType": "UserProvidedMacro"}'
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '782'
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit
response:
body:
string: '"failed_run_name"'
headers:
connection:
- keep-alive
content-length:
- '38'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
x-content-type-options:
- nosniff
x-request-time:
- '6.976'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/failed_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "print_env", "type": "python", "source":
{"type": "code", "path": "print_env.py"}, "inputs": {"key": "${inputs.key}"},
"tool": "print_env.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "print_env.py", "type": "python",
"inputs": {"key": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "print_env.py", "function": "get_env_var",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"key": {"type": "string", "is_chat_input": false}}, "outputs": {"output":
{"type": "string", "reference": "${print_env.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/failed_run_name/flowRuns/failed_run_name",
"flowRunId": "failed_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-failed-run",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/failed_run_name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "27175d15-f6d8-4792-9072-e2b684753205",
"studioPortalEndpoint": "https://ml.azure.com/runs/failed_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12855'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.388'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/failed_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "print_env", "type": "python", "source":
{"type": "code", "path": "print_env.py"}, "inputs": {"key": "${inputs.key}"},
"tool": "print_env.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "print_env.py", "type": "python",
"inputs": {"key": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "print_env.py", "function": "get_env_var",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"key": {"type": "string", "is_chat_input": false}}, "outputs": {"output":
{"type": "string", "reference": "${print_env.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/failed_run_name/flowRuns/failed_run_name",
"flowRunId": "failed_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-failed-run",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/failed_run_name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "27175d15-f6d8-4792-9072-e2b684753205",
"studioPortalEndpoint": "https://ml.azure.com/runs/failed_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12855'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.261'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/failed_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "print_env", "type": "python", "source":
{"type": "code", "path": "print_env.py"}, "inputs": {"key": "${inputs.key}"},
"tool": "print_env.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "print_env.py", "type": "python",
"inputs": {"key": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "print_env.py", "function": "get_env_var",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"key": {"type": "string", "is_chat_input": false}}, "outputs": {"output":
{"type": "string", "reference": "${print_env.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/failed_run_name/flowRuns/failed_run_name",
"flowRunId": "failed_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-failed-run",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/failed_run_name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "27175d15-f6d8-4792-9072-e2b684753205",
"studioPortalEndpoint": "https://ml.azure.com/runs/failed_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12855'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.411'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/failed_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "print_env", "type": "python", "source":
{"type": "code", "path": "print_env.py"}, "inputs": {"key": "${inputs.key}"},
"tool": "print_env.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "print_env.py", "type": "python",
"inputs": {"key": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "print_env.py", "function": "get_env_var",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"key": {"type": "string", "is_chat_input": false}}, "outputs": {"output":
{"type": "string", "reference": "${print_env.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/failed_run_name/flowRuns/failed_run_name",
"flowRunId": "failed_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-failed-run",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/failed_run_name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "27175d15-f6d8-4792-9072-e2b684753205",
"studioPortalEndpoint": "https://ml.azure.com/runs/failed_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12855'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.328'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/failed_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "print_env", "type": "python", "source":
{"type": "code", "path": "print_env.py"}, "inputs": {"key": "${inputs.key}"},
"tool": "print_env.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "print_env.py", "type": "python",
"inputs": {"key": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "print_env.py", "function": "get_env_var",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"key": {"type": "string", "is_chat_input": false}}, "outputs": {"output":
{"type": "string", "reference": "${print_env.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/failed_run_name/flowRuns/failed_run_name",
"flowRunId": "failed_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-failed-run",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/failed_run_name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "27175d15-f6d8-4792-9072-e2b684753205",
"studioPortalEndpoint": "https://ml.azure.com/runs/failed_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12855'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.367'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/failed_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "print_env", "type": "python", "source":
{"type": "code", "path": "print_env.py"}, "inputs": {"key": "${inputs.key}"},
"tool": "print_env.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "print_env.py", "type": "python",
"inputs": {"key": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "print_env.py", "function": "get_env_var",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"key": {"type": "string", "is_chat_input": false}}, "outputs": {"output":
{"type": "string", "reference": "${print_env.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/failed_run_name/flowRuns/failed_run_name",
"flowRunId": "failed_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-failed-run",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/failed_run_name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "27175d15-f6d8-4792-9072-e2b684753205",
"studioPortalEndpoint": "https://ml.azure.com/runs/failed_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12855'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.252'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/failed_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "print_env", "type": "python", "source":
{"type": "code", "path": "print_env.py"}, "inputs": {"key": "${inputs.key}"},
"tool": "print_env.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "print_env.py", "type": "python",
"inputs": {"key": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "print_env.py", "function": "get_env_var",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"key": {"type": "string", "is_chat_input": false}}, "outputs": {"output":
{"type": "string", "reference": "${print_env.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/failed_run_name/flowRuns/failed_run_name",
"flowRunId": "failed_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-failed-run",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/failed_run_name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "27175d15-f6d8-4792-9072-e2b684753205",
"studioPortalEndpoint": "https://ml.azure.com/runs/failed_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12855'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.289'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/failed_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "print_env", "type": "python", "source":
{"type": "code", "path": "print_env.py"}, "inputs": {"key": "${inputs.key}"},
"tool": "print_env.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "print_env.py", "type": "python",
"inputs": {"key": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "print_env.py", "function": "get_env_var",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"key": {"type": "string", "is_chat_input": false}}, "outputs": {"output":
{"type": "string", "reference": "${print_env.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/failed_run_name/flowRuns/failed_run_name",
"flowRunId": "failed_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-failed-run",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/failed_run_name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "27175d15-f6d8-4792-9072-e2b684753205",
"studioPortalEndpoint": "https://ml.azure.com/runs/failed_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12855'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.461'
status:
code: 200
message: OK
- request:
body: '{"runId": "failed_run_name", "selectRunMetadata": true, "selectRunDefinition":
true, "selectJobSpecification": true}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '137'
Content-Type:
- application/json
User-Agent:
- python-requests/2.31.0
method: POST
uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata
response:
body:
string: '{"runMetadata": {"runNumber": 1705046481, "rootRunId": "failed_run_name",
"createdUtc": "2024-01-12T08:01:21.0459935+00:00", "createdBy": {"userObjectId":
"00000000-0000-0000-0000-000000000000", "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587",
"upn": null}, "userId": "00000000-0000-0000-0000-000000000000", "token": null,
"tokenExpiryTimeUtc": null, "error": {"error": {"code": "UserError", "severity":
null, "message": "The input for batch run is incorrect. Couldn''t find these
mapping relations: ${data.key}. Please make sure your input mapping keys and
values match your YAML input section and input data. For more information,
refer to the following documentation: https://aka.ms/pf/column-mapping", "messageFormat":
"The input for batch run is incorrect. Couldn''t find these mapping relations:
{invalid_relations}. Please make sure your input mapping keys and values match
your YAML input section and input data. For more information, refer to the
following documentation: https://aka.ms/pf/column-mapping", "messageParameters":
{"invalid_relations": "${data.key}"}, "referenceCode": "Executor", "detailsUri":
null, "target": null, "details": [], "innerError": {"code": "ValidationError",
"innerError": {"code": "InputMappingError", "innerError": null}}, "debugInfo":
{"type": "InputMappingError", "message": "The input for batch run is incorrect.
Couldn''t find these mapping relations: ${data.key}. Please make sure your
input mapping keys and values match your YAML input section and input data.
For more information, refer to the following documentation: https://aka.ms/pf/column-mapping",
"stackTrace": "Traceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/runtime/runtime.py\",
line 671, in execute_bulk_run_request\n batch_engine.run(\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_engine.py\",
line 147, in run\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_engine.py\",
line 132, in run\n batch_inputs = batch_input_processor.process_batch_inputs(input_dirs,
inputs_mapping)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_inputs_processor.py\",
line 41, in process_batch_inputs\n return self._validate_and_apply_inputs_mapping(input_dicts,
inputs_mapping)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_inputs_processor.py\",
line 91, in _validate_and_apply_inputs_mapping\n resolved_inputs = self._apply_inputs_mapping_for_all_lines(inputs,
inputs_mapping)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_inputs_processor.py\",
line 163, in _apply_inputs_mapping_for_all_lines\n result = [apply_inputs_mapping(item,
inputs_mapping) for item in merged_list]\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_inputs_processor.py\",
line 163, in <listcomp>\n result = [apply_inputs_mapping(item, inputs_mapping)
for item in merged_list]\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_inputs_processor.py\",
line 292, in apply_inputs_mapping\n raise InputMappingError(\n", "innerException":
null, "data": null, "errorResponse": null}, "additionalInfo": null}, "correlation":
null, "environment": null, "location": null, "time": "2024-01-12T08:01:41.930978+00:00",
"componentName": "promptflow-runtime/20231204.v4 Designer/1.0 promptflow-sdk/0.0.1
azsdk-python-azuremachinelearningdesignerserviceclient/unknown Python/3.10.13
(Windows-10-10.0.22631-SP0) promptflow/1.2.0rc1"}, "warnings": null, "revision":
7, "statusRevision": 3, "runUuid": "ebad9732-07a7-434c-b7fb-637162729eb8",
"parentRunUuid": null, "rootRunUuid": "ebad9732-07a7-434c-b7fb-637162729eb8",
"lastStartTimeUtc": null, "currentComputeTime": null, "computeDuration": "00:00:01.7835699",
"effectiveStartTimeUtc": null, "lastModifiedBy": {"userObjectId": "00000000-0000-0000-0000-000000000000",
"userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "18a66f5f-dbdf-4c17-9dd7-1634712a9cbe",
"upn": null}, "lastModifiedUtc": "2024-01-12T08:01:41.5560578+00:00", "duration":
"00:00:01.7835699", "cancelationReason": null, "currentAttemptId": 1, "runId":
"failed_run_name", "parentRunId": null, "experimentId": "1848033e-509f-4c52-92ee-f0a0121fe99e",
"status": "Failed", "startTimeUtc": "2024-01-12T08:01:40.3861186+00:00", "endTimeUtc":
"2024-01-12T08:01:42.1696885+00:00", "scheduleId": null, "displayName": "sdk-cli-test-fixture-failed-run",
"name": null, "dataContainerId": "dcid.failed_run_name", "description": null,
"hidden": false, "runType": "azureml.promptflow.FlowRun", "runTypeV2": {"orchestrator":
null, "traits": [], "attribution": "PromptFlow", "computeType": "AmlcDsi"},
"properties": {"azureml.promptflow.runtime_name": "test-runtime-ci", "azureml.promptflow.runtime_version":
"20231204.v4", "azureml.promptflow.definition_file_name": "flow.dag.yaml",
"azureml.promptflow.session_id": "31858a8dfc61a642bb0ab6df4fc3ac7b3807de4ffead00d1",
"azureml.promptflow.flow_lineage_id": "de293df4f50622090c0225852d59cd663b6b629e38728f7444fa0f12255a0647",
"azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore",
"azureml.promptflow.flow_definition_blob_path": "LocalUpload/bc20fa079592a8072922533f187e3184/partial_fail/flow.dag.yaml",
"azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl",
"_azureml.evaluation_run": "promptflow.BatchRun", "azureml.promptflow.snapshot_id":
"27175d15-f6d8-4792-9072-e2b684753205", "azureml.promptflow.total_tokens":
"0", "_azureml.evaluate_artifacts": "[{\"path\": \"instance_results.jsonl\",
\"type\": \"table\"}]"}, "parameters": {}, "actionUris": {}, "scriptName":
null, "target": null, "uniqueChildRunComputeTargets": [], "tags": {}, "settings":
{}, "services": {}, "inputDatasets": [], "outputDatasets": [], "runDefinition":
null, "jobSpecification": null, "primaryMetricName": null, "createdFrom":
null, "cancelUri": null, "completeUri": null, "diagnosticsUri": null, "computeRequest":
null, "compute": null, "retainForLifetimeOfWorkspace": false, "queueingInfo":
null, "inputs": null, "outputs": {"debug_info": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_failed_run_name_output_data_debug_info/versions/1",
"type": "UriFolder"}}}, "runDefinition": null, "jobSpecification": null, "systemSettings":
null}'
headers:
connection:
- keep-alive
content-length:
- '7988'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.048'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/failed_run_name/logContent
response:
body:
string: '"2024-01-12 08:01:25 +0000 49 promptflow-runtime INFO [failed_run_name]
Receiving v2 bulk run request 74219027-a510-47c5-b30f-f9a2e05d3f12: {\"flow_id\":
\"failed_run_name\", \"flow_run_id\": \"failed_run_name\", \"flow_source\":
{\"flow_source_type\": 1, \"flow_source_info\": {\"snapshot_id\": \"27175d15-f6d8-4792-9072-e2b684753205\"},
\"flow_dag_file\": \"flow.dag.yaml\"}, \"log_path\": \"https://promptfloweast4063704120.blob.core.windows.net/azureml/ExperimentRun/dcid.failed_run_name/logs/azureml/executionlogs.txt?sv=2019-07-07&sr=b&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-12T07%3A42%3A25Z&ske=2024-01-13T15%3A52%3A25Z&sks=b&skv=2019-07-07&st=2024-01-12T07%3A51%3A24Z&se=2024-01-12T16%3A01%3A24Z&sp=rcw\",
\"app_insights_instrumentation_key\": \"InstrumentationKey=**data_scrubbed**;IngestionEndpoint=https://eastus-6.in.applicationinsights.azure.com/;LiveEndpoint=https://eastus.livediagnostics.monitor.azure.com/\",
\"data_inputs\": {\"data\": \"azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl\"},
\"azure_storage_setting\": {\"azure_storage_mode\": 1, \"storage_account_name\":
\"promptfloweast4063704120\", \"blob_container_name\": \"azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5\",
\"flow_artifacts_root_path\": \"promptflow/PromptFlowArtifacts/failed_run_name\",
\"blob_container_sas_token\": \"?sv=2019-07-07&sr=c&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-12T08%3A01%3A25Z&ske=2024-01-19T08%3A01%3A25Z&sks=b&skv=2019-07-07&se=2024-01-19T08%3A01%3A25Z&sp=racwl\",
\"output_datastore_name\": \"workspaceblobstore\"}}\n2024-01-12 08:01:25 +0000 49
promptflow-runtime INFO Runtime version: 20231204.v4. PromptFlow version:
1.2.0rc1\n2024-01-12 08:01:25 +0000 49 promptflow-runtime INFO Updating
failed_run_name to Status.Preparing...\n2024-01-12 08:01:25 +0000 49
promptflow-runtime INFO Downloading snapshot to /mnt/host/service/app/39649/requests/failed_run_name\n2024-01-12
08:01:25 +0000 49 promptflow-runtime INFO Get snapshot sas url for
27175d15-f6d8-4792-9072-e2b684753205...\n2024-01-12 08:01:32 +0000 49
promptflow-runtime INFO Downloading snapshot 27175d15-f6d8-4792-9072-e2b684753205
from uri https://promptfloweast4063704120.blob.core.windows.net/snapshotzips/promptflow-eastus:3e123da1-f9a5-4c91-9234-8d9ffbb39ff5:snapshotzip/27175d15-f6d8-4792-9072-e2b684753205.zip...\n2024-01-12
08:01:32 +0000 49 promptflow-runtime INFO Downloaded file /mnt/host/service/app/39649/requests/failed_run_name/27175d15-f6d8-4792-9072-e2b684753205.zip
with size 701 for snapshot 27175d15-f6d8-4792-9072-e2b684753205.\n2024-01-12
08:01:32 +0000 49 promptflow-runtime INFO Download snapshot 27175d15-f6d8-4792-9072-e2b684753205
completed.\n2024-01-12 08:01:32 +0000 49 promptflow-runtime INFO Successfully
download snapshot to /mnt/host/service/app/39649/requests/failed_run_name\n2024-01-12
08:01:32 +0000 49 promptflow-runtime INFO About to execute a python
flow.\n2024-01-12 08:01:32 +0000 49 promptflow-runtime INFO Use spawn
method to start child process.\n2024-01-12 08:01:32 +0000 49 promptflow-runtime
INFO Starting to check process 3429 status for run failed_run_name\n2024-01-12
08:01:32 +0000 49 promptflow-runtime INFO Start checking run status
for run failed_run_name\n2024-01-12 08:01:36 +0000 3429 promptflow-runtime
INFO [49--3429] Start processing flowV2......\n2024-01-12 08:01:36 +0000 3429
promptflow-runtime INFO Runtime version: 20231204.v4. PromptFlow version:
1.2.0rc1\n2024-01-12 08:01:36 +0000 3429 promptflow-runtime INFO Setting
mlflow tracking uri...\n2024-01-12 08:01:36 +0000 3429 promptflow-runtime
INFO Validating ''AzureML Data Scientist'' user authentication...\n2024-01-12
08:01:36 +0000 3429 promptflow-runtime INFO Successfully validated
''AzureML Data Scientist'' user authentication.\n2024-01-12 08:01:36 +0000 3429
promptflow-runtime INFO Using AzureMLRunStorageV2\n2024-01-12 08:01:36
+0000 3429 promptflow-runtime INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-12
08:01:37 +0000 3429 promptflow-runtime INFO Initialized blob service
client for AzureMLRunTracker.\n2024-01-12 08:01:37 +0000 3429 promptflow-runtime
INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-12
08:01:40 +0000 3429 promptflow-runtime INFO Resolve data from url finished
in 2.9352363562211394 seconds\n2024-01-12 08:01:40 +0000 3429 promptflow-runtime
INFO Starting the aml run ''failed_run_name''...\n2024-01-12 08:01:40
+0000 3429 execution WARNING Starting run without column mapping
may lead to unexpected results. Please consult the following documentation
for more information: https://aka.ms/pf/column-mapping\n2024-01-12 08:01:40
+0000 3429 execution.bulk ERROR Error occurred while executing batch
run. Exception: The input for batch run is incorrect. Couldn''t find these
mapping relations: ${data.key}. Please make sure your input mapping keys and
values match your YAML input section and input data. For more information,
refer to the following documentation: https://aka.ms/pf/column-mapping\n2024-01-12
08:01:40 +0000 3429 promptflow-runtime ERROR Run failed_run_name failed.
Exception: {\n \"message\": \"The input for batch run is incorrect. Couldn''t
find these mapping relations: ${data.key}. Please make sure your input mapping
keys and values match your YAML input section and input data. For more information,
refer to the following documentation: https://aka.ms/pf/column-mapping\",\n \"messageFormat\":
\"The input for batch run is incorrect. Couldn''t find these mapping relations:
{invalid_relations}. Please make sure your input mapping keys and values match
your YAML input section and input data. For more information, refer to the
following documentation: https://aka.ms/pf/column-mapping\",\n \"messageParameters\":
{\n \"invalid_relations\": \"${data.key}\"\n },\n \"referenceCode\":
\"Executor\",\n \"code\": \"UserError\",\n \"innerError\": {\n \"code\":
\"ValidationError\",\n \"innerError\": {\n \"code\": \"InputMappingError\",\n \"innerError\":
null\n }\n },\n \"debugInfo\": {\n \"type\": \"InputMappingError\",\n \"message\":
\"The input for batch run is incorrect. Couldn''t find these mapping relations:
${data.key}. Please make sure your input mapping keys and values match your
YAML input section and input data. For more information, refer to the following
documentation: https://aka.ms/pf/column-mapping\",\n \"stackTrace\": \"Traceback
(most recent call last):\\n File \\\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/runtime/runtime.py\\\",
line 671, in execute_bulk_run_request\\n batch_engine.run(\\n File \\\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_engine.py\\\",
line 147, in run\\n raise e\\n File \\\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_engine.py\\\",
line 132, in run\\n batch_inputs = batch_input_processor.process_batch_inputs(input_dirs,
inputs_mapping)\\n File \\\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_inputs_processor.py\\\",
line 41, in process_batch_inputs\\n return self._validate_and_apply_inputs_mapping(input_dicts,
inputs_mapping)\\n File \\\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_inputs_processor.py\\\",
line 91, in _validate_and_apply_inputs_mapping\\n resolved_inputs = self._apply_inputs_mapping_for_all_lines(inputs,
inputs_mapping)\\n File \\\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_inputs_processor.py\\\",
line 163, in _apply_inputs_mapping_for_all_lines\\n result = [apply_inputs_mapping(item,
inputs_mapping) for item in merged_list]\\n File \\\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_inputs_processor.py\\\",
line 163, in <listcomp>\\n result = [apply_inputs_mapping(item, inputs_mapping)
for item in merged_list]\\n File \\\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_inputs_processor.py\\\",
line 292, in apply_inputs_mapping\\n raise InputMappingError(\\n\",\n \"innerException\":
null\n }\n}\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/runtime/runtime.py\",
line 671, in execute_bulk_run_request\n batch_engine.run(\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_engine.py\",
line 147, in run\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_engine.py\",
line 132, in run\n batch_inputs = batch_input_processor.process_batch_inputs(input_dirs,
inputs_mapping)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_inputs_processor.py\",
line 41, in process_batch_inputs\n return self._validate_and_apply_inputs_mapping(input_dicts,
inputs_mapping)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_inputs_processor.py\",
line 91, in _validate_and_apply_inputs_mapping\n resolved_inputs = self._apply_inputs_mapping_for_all_lines(inputs,
inputs_mapping)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_inputs_processor.py\",
line 163, in _apply_inputs_mapping_for_all_lines\n result = [apply_inputs_mapping(item,
inputs_mapping) for item in merged_list]\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_inputs_processor.py\",
line 163, in <listcomp>\n result = [apply_inputs_mapping(item, inputs_mapping)
for item in merged_list]\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_inputs_processor.py\",
line 292, in apply_inputs_mapping\n raise InputMappingError(\npromptflow.batch._errors.InputMappingError:
The input for batch run is incorrect. Couldn''t find these mapping relations:
${data.key}. Please make sure your input mapping keys and values match your
YAML input section and input data. For more information, refer to the following
documentation: https://aka.ms/pf/column-mapping\n2024-01-12 08:01:41 +0000 3429
execution.bulk INFO Upload status summary metrics for run failed_run_name
finished in 0.7801888957619667 seconds\n2024-01-12 08:01:41 +0000 3429
promptflow-runtime INFO Successfully write run properties {\"azureml.promptflow.total_tokens\":
0, \"_azureml.evaluate_artifacts\": \"[{\\\"path\\\": \\\"instance_results.jsonl\\\",
\\\"type\\\": \\\"table\\\"}]\"} with run id ''failed_run_name''\n2024-01-12
08:01:41 +0000 3429 execution.bulk INFO Upload RH properties for
run failed_run_name finished in 0.08418271783739328 seconds\n2024-01-12 08:01:41
+0000 3429 promptflow-runtime INFO Creating unregistered output Asset
for Run failed_run_name...\n2024-01-12 08:01:41 +0000 3429 promptflow-runtime
INFO Created debug_info Asset: azureml://locations/eastus/workspaces/00000/data/azureml_failed_run_name_output_data_debug_info/versions/1\n2024-01-12
08:01:41 +0000 3429 promptflow-runtime INFO Patching failed_run_name...\n2024-01-12
08:01:41 +0000 3429 promptflow-runtime WARNING [failed_run_name] Run failed.
Execution stackTrace: Traceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/runtime/runtime.py\",
line 671, in execute_bulk_run_request\n batch_engine.run(\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_engine.py\",
line 147, in run\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_engine.py\",
line 132, in run\n batch_inputs = batch_input_processor.process_batch_inputs(input_dirs,
inputs_mapping)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_inputs_processor.py\",
line 41, in process_batch_inputs\n return self._validate_and_apply_inputs_mapping(input_dicts,
inputs_mapping)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_inputs_processor.py\",
line 91, in _validate_and_apply_inputs_mapping\n resolved_inputs = self._apply_inputs_mapping_for_all_lines(inputs,
inputs_mapping)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_inputs_processor.py\",
line 163, in _apply_inputs_mapping_for_all_lines\n result = [apply_inputs_mapping(item,
inputs_mapping) for item in merged_list]\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_inputs_processor.py\",
line 163, in <listcomp>\n result = [apply_inputs_mapping(item, inputs_mapping)
for item in merged_list]\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/batch/_batch_inputs_processor.py\",
line 292, in apply_inputs_mapping\n raise InputMappingError(\n\n2024-01-12
08:01:42 +0000 3429 promptflow-runtime INFO Ending the aml run ''failed_run_name''
with status ''Failed''...\n2024-01-12 08:01:43 +0000 49 promptflow-runtime
INFO Process 3429 finished\n2024-01-12 08:01:43 +0000 49 promptflow-runtime
INFO [49] Child process finished!\n2024-01-12 08:01:43 +0000 49 promptflow-runtime
INFO [failed_run_name] End processing bulk run\n2024-01-12 08:01:43 +0000 49
promptflow-runtime ERROR Submit flow request failed Code: 400 InnerException
type: InputMappingError Exception type hierarchy: UserError/ValidationError/InputMappingError\n2024-01-12
08:01:43 +0000 49 promptflow-runtime INFO Cleanup working dir /mnt/host/service/app/39649/requests/failed_run_name
for bulk run\n"'
headers:
connection:
- keep-alive
content-length:
- '15117'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.766'
status:
code: 200
message: OK
version: 1
| promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_stream_failed_run_logs.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_stream_failed_run_logs.yaml",
"repo_id": "promptflow",
"token_count": 70066
} | 98 |
flow: ../flows/web_classification
data: ../datas/webClassification1.jsonl
column_mapping:
url: "${data.url}"
variant: ${summarize_text_content.variant_0}
# run config: env related
environment_variables: env_file
connections:
classify_with_llm:
connection: new_ai_connection
# model is also supported for openai connection
model: test_model | promptflow/src/promptflow/tests/test_configs/runs/run_with_connections_model.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/runs/run_with_connections_model.yaml",
"repo_id": "promptflow",
"token_count": 131
} | 99 |
import bs4
import requests
from promptflow import tool
@tool
def fetch_text_content_from_url(url: str):
# Send a request to the URL
try:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35"
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
# Parse the HTML content using BeautifulSoup
soup = bs4.BeautifulSoup(response.text, "html.parser")
soup.prettify()
return soup.get_text()[:2000]
else:
msg = (
f"Get url failed with status code {response.status_code}.\nURL: {url}\nResponse: "
f"{response.text[:100]}"
)
print(msg)
return "No available content"
except Exception as e:
print("Get url failed with error: {}".format(e))
return "No available content"
| promptflow/src/promptflow/tests/test_configs/runs/web_classification_variant_0_20231205_120253_104100/snapshot/fetch_text_content_from_url.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/runs/web_classification_variant_0_20231205_120253_104100/snapshot/fetch_text_content_from_url.py",
"repo_id": "promptflow",
"token_count": 484
} | 100 |
from pathlib import Path
import importlib.util
from promptflow import PFClient
package_name = "tool_package"
def list_package_tools(raise_error=False):
"""
List the meta of all tools in the package.
The key of meta dict is the module name of tools and value is the meta data of the tool.
"""
# This function is auto generated by pf CLI, please do not modify manually.
tools = {}
pf_client = PFClient()
tools = pf_client._tools._list_tools_in_package(package_name, raise_error=raise_error)
return tools | promptflow/src/promptflow/tests/test_configs/tools/tool_package/tool_package/utils.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/tools/tool_package/tool_package/utils.py",
"repo_id": "promptflow",
"token_count": 172
} | 101 |
inputs:
num:
type: int
outputs:
content:
type: string
reference: ${divide_num.output}
nodes:
- name: divide_num
type: python
source:
type: code
path: divide_num.py
inputs:
num: ${inputs.num}
activate:
when: ${inputs.num} > 0 | promptflow/src/promptflow/tests/test_configs/wrong_flows/invalid_activate_config/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/wrong_flows/invalid_activate_config/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 115
} | 102 |
inputs:
num:
type: int
outputs:
content:
type: string
reference: ${another_stringify_num.output}
nodes:
- name: stringify_num
type: python
source:
type: code
path: stringify_num.py
inputs:
num: ${inputs.num}
| promptflow/src/promptflow/tests/test_configs/wrong_flows/outputs_reference_not_valid/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/wrong_flows/outputs_reference_not_valid/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 103
} | 103 |
inputs:
text:
type: string
outputs:
output:
type: string
reference: ${search_by_text.output.search_metadata}
nodes:
- name: search_by_text
type: python
source:
type: package
tool: promptflow.tools.serpapi.SerpAPI.search_11
inputs:
connection: serp_connection
query: ${inputs.text}
num: 1 | promptflow/src/promptflow/tests/test_configs/wrong_flows/wrong_tool_in_package_tools/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/wrong_flows/wrong_tool_in_package_tools/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 132
} | 104 |
# Manage flows
:::{admonition} Experimental feature
This is an experimental feature, and may change at any time. Learn [more](../../how-to-guides/faq.md#stable-vs-experimental).
:::
This documentation will walk you through how to manage your flow with CLI and SDK on [Azure AI](https://learn.microsoft.com/en-us/azure/machine-learning/prompt-flow/overview-what-is-prompt-flow?view=azureml-api-2).
The flow examples in this guide come from [examples/flows/standard](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard).
In general:
- For `CLI`, you can run `pfazure flow --help` in the terminal to see help messages.
- For `SDK`, you can refer to [Promptflow Python Library Reference](../../reference/python-library-reference/promptflow.md) and check `promptflow.azure.PFClient.flows` for more flow operations.
:::{admonition} Prerequisites
- Refer to the prerequisites in [Quick start](./quick-start.md#prerequisites).
- Use the `az login` command in the command line to log in. This enables promptflow to access your credentials.
:::
Let's take a look at the following topics:
- [Manage flows](#manage-flows)
- [Create a flow](#create-a-flow)
- [List flows](#list-flows)
## Create a flow
::::{tab-set}
:::{tab-item} CLI
:sync: CLI
To set the target workspace, you can either specify it in the CLI command or set default value in the Azure CLI.
You can refer to [Quick start](./quick-start.md#submit-a-run-to-workspace) for more information.
To create a flow to Azure from local flow directory, you can use
```bash
# create the flow
pfazure flow create --flow <path-to-flow-folder>
# create the flow with metadata
pfazure flow create --flow <path-to-flow-folder> --set display_name=<display-name> description=<description> tags.key1=value1
```
After the flow is created successfully, you can see the flow summary in the command line.
![img](../../media/cloud/manage-flows/flow_create_0.png)
:::
:::{tab-item} SDK
:sync: SDK
1. Import the required libraries
```python
from azure.identity import DefaultAzureCredential, InteractiveBrowserCredential
# azure version promptflow apis
from promptflow.azure import PFClient
```
2. Get credential
```python
try:
credential = DefaultAzureCredential()
# Check if given credential can get token successfully.
credential.get_token("https://management.azure.com/.default")
except Exception as ex:
# Fall back to InteractiveBrowserCredential in case DefaultAzureCredential not work
credential = InteractiveBrowserCredential()
```
3. Get a handle to the workspace
```python
# Get a handle to workspace
pf = PFClient(
credential=credential,
subscription_id="<SUBSCRIPTION_ID>", # this will look like xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name="<RESOURCE_GROUP>",
workspace_name="<AML_WORKSPACE_NAME>",
)
```
4. Create the flow
```python
# specify flow path
flow = "./web-classification"
# create flow to Azure
flow = pf.flows.create_or_update(
flow=flow, # path to the flow folder
display_name="my-web-classification", # it will be "web-classification-{timestamp}" if not specified
type="standard", # it will be "standard" if not specified
)
```
:::
::::
On Azure portal, you can see the created flow in the flow list.
![img](../../media/cloud/manage-flows/flow_create_1.png)
And the flow source folder on file share is `Users/<alias>/promptflow/<flow-display-name>`:
![img](../../media/cloud/manage-flows/flow_create_2.png)
Note that if the flow display name is not specified, it will default to the flow folder name + timestamp. (e.g. `web-classification-11-13-2023-14-19-10`)
## List flows
::::{tab-set}
:::{tab-item} CLI
:sync: CLI
List flows with default json format:
```bash
pfazure flow list --max-results 1
```
![img](../../media/cloud/manage-flows/flow_list_0.png)
:::
:::{tab-item} SDK
:sync: SDK
```python
# reuse the pf client created in "create a flow" section
flows = pf.flows.list(max_results=1)
```
:::
:::: | promptflow/docs/cloud/azureai/manage-flows.md/0 | {
"file_path": "promptflow/docs/cloud/azureai/manage-flows.md",
"repo_id": "promptflow",
"token_count": 1304
} | 0 |
# Deploy a flow using Docker
:::{admonition} Experimental feature
This is an experimental feature, and may change at any time. Learn [more](../faq.md#stable-vs-experimental).
:::
There are two steps to deploy a flow using docker:
1. Build the flow as docker format.
2. Build and run the docker image.
## Build a flow as docker format
::::{tab-set}
:::{tab-item} CLI
:sync: CLI
Use the command below to build a flow as docker format:
```bash
pf flow build --source <path-to-your-flow-folder> --output <your-output-dir> --format docker
```
:::
:::{tab-item} VS Code Extension
:sync: VSC
In visual editor, choose:
![img](../../media/how-to-guides/vscode_export.png)
Click the button below to build a flow as docker format:
![img](../../media/how-to-guides/vscode_export_as_docker.png)
:::
::::
Note that all dependent connections must be created before exporting as docker.
### Docker format folder structure
Exported Dockerfile & its dependencies are located in the same folder. The structure is as below:
- flow: the folder contains all the flow files
- ...
- connections: the folder contains yaml files to create all related connections
- ...
- Dockerfile: the dockerfile to build the image
- start.sh: the script used in `CMD` of `Dockerfile` to start the service
- runit: the folder contains all the runit scripts
- ...
- settings.json: a json file to store the settings of the docker image
- README.md: Simple introduction of the files
## Deploy with Docker
We are going to use the [web-classification](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard/web-classification/) as
an example to show how to deploy with docker.
Please ensure you have [create the connection](../manage-connections.md#create-a-connection) required by flow, if not, you could
refer to [Setup connection for web-classification](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard/web-classification).
## Build a flow as docker format app
Use the command below to build a flow as docker format app:
```bash
pf flow build --source ../../flows/standard/web-classification --output dist --format docker
```
Note that all dependent connections must be created before exporting as docker.
### Build Docker image
Like other Dockerfile, you need to build the image first. You can tag the image with any name you want. In this example, we use `promptflow-serve`.
Run the command below to build image:
```bash
docker build dist -t web-classification-serve
```
### Run Docker image
Run the docker image will start a service to serve the flow inside the container.
#### Connections
If the service involves connections, all related connections will be exported as yaml files and recreated in containers.
Secrets in connections won't be exported directly. Instead, we will export them as a reference to environment variables:
```yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/OpenAIConnection.schema.json
type: open_ai
name: open_ai_connection
module: promptflow.connections
api_key: ${env:OPEN_AI_CONNECTION_API_KEY} # env reference
```
You'll need to set up the environment variables in the container to make the connections work.
### Run with `docker run`
You can run the docker image directly set via below commands:
```bash
# The started service will listen on port 8080.You can map the port to any port on the host machine as you want.
docker run -p 8080:8080 -e OPEN_AI_CONNECTION_API_KEY=<secret-value> web-classification-serve
```
### Test the endpoint
After start the service, you can use curl to test it:
```bash
curl http://localhost:8080/score --data '{"url":"https://play.google.com/store/apps/details?id=com.twitter.android"}' -X POST -H "Content-Type: application/json"
```
## Next steps
- Try the example [here](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/flow-deploy/docker).
- See how to [deploy a flow using kubernetes](deploy-using-kubernetes.md).
| promptflow/docs/how-to-guides/deploy-a-flow/deploy-using-docker.md/0 | {
"file_path": "promptflow/docs/how-to-guides/deploy-a-flow/deploy-using-docker.md",
"repo_id": "promptflow",
"token_count": 1142
} | 1 |
# Develop a tool
We provide guides on how to develop a tool and use it.
```{toctree}
:maxdepth: 1
:hidden:
create-and-use-tool-package
add-a-tool-icon
add-category-and-tags-for-tool
use-file-path-as-tool-input
customize_an_llm_tool
create-cascading-tool-inputs
create-your-own-custom-strong-type-connection
create-dynamic-list-tool-input
```
| promptflow/docs/how-to-guides/develop-a-tool/index.md/0 | {
"file_path": "promptflow/docs/how-to-guides/develop-a-tool/index.md",
"repo_id": "promptflow",
"token_count": 130
} | 2 |
# Integrations
The Integrations section contains documentation on custom extensions created by the community that expand prompt flow's capabilities.
These include tools that enrich flows, as well as tutorials on innovative ways to use prompt flow.
```{toctree}
:maxdepth: 1
tools/index
llms/index
``` | promptflow/docs/integrations/index.md/0 | {
"file_path": "promptflow/docs/integrations/index.md",
"repo_id": "promptflow",
"token_count": 74
} | 3 |
# Prompt
## Introduction
The Prompt Tool in PromptFlow offers a collection of textual templates that serve as a starting point for creating prompts.
These templates, based on the Jinja2 template engine, facilitate the definition of prompts. The tool proves useful
when prompt tuning is required prior to feeding the prompts into the Language Model (LLM) model in PromptFlow.
## Inputs
| Name | Type | Description | Required |
|--------------------|--------|----------------------------------------------------------|----------|
| prompt | string | The prompt template in Jinja | Yes |
| Inputs | - | List of variables of prompt template and its assignments | - |
## Outputs
The prompt text parsed from the prompt + Inputs
## How to write Prompt?
1. Prepare jinja template. Learn more about [Jinja](https://jinja.palletsprojects.com/en/3.1.x/)
_In below example, the prompt incorporates Jinja templating syntax to dynamically generate the welcome message and personalize it based on the user's name. It also presents a menu of options for the user to choose from. Depending on whether the user_name variable is provided, it either addresses the user by name or uses a generic greeting._
```jinja
Welcome to {{ website_name }}!
{% if user_name %}
Hello, {{ user_name }}!
{% else %}
Hello there!
{% endif %}
Please select an option from the menu below:
1. View your account
2. Update personal information
3. Browse available products
4. Contact customer support
```
2. Assign value for the variables.
_In above example, two variables would be automatically detected and listed in '**Inputs**' section. Please assign values._
### Sample 1
Inputs
| Variable | Type | Sample Value |
|---------------|--------|--------------|
| website_name | string | "Microsoft" |
| user_name | string | "Jane" |
Outputs
```
Welcome to Microsoft! Hello, Jane! Please select an option from the menu below: 1. View your account 2. Update personal information 3. Browse available products 4. Contact customer support
```
### Sample 2
Inputs
| Variable | Type | Sample Value |
|--------------|--------|----------------|
| website_name | string | "Bing" |
| user_name | string | " |
Outputs
```
Welcome to Bing! Hello there! Please select an option from the menu below: 1. View your account 2. Update personal information 3. Browse available products 4. Contact customer support
``` | promptflow/docs/reference/tools-reference/prompt-tool.md/0 | {
"file_path": "promptflow/docs/reference/tools-reference/prompt-tool.md",
"repo_id": "promptflow",
"token_count": 783
} | 4 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
environment:
python_requirements_txt: requirements.txt
inputs:
chat_history:
type: list
is_chat_history: true
default: []
question:
type: string
is_chat_input: true
default: '1+1=?'
outputs:
answer:
type: string
reference: ${extract_result.output}
is_chat_output: true
nodes:
- name: chat
use_variants: true
- name: extract_result
type: python
source:
type: code
path: extract_result.py
inputs:
input1: ${chat.output}
node_variants:
chat:
default_variant_id: variant_0
variants:
variant_0:
node:
type: llm
source:
type: code
path: chat.jinja2
inputs:
deployment_name: gpt-4
max_tokens: 256
temperature: 0
chat_history: ${inputs.chat_history}
question: ${inputs.question}
model: gpt-4
connection: open_ai_connection
api: chat
variant_1:
node:
type: llm
source:
type: code
path: chat_variant_1.jinja2
inputs:
deployment_name: gpt-4
max_tokens: 256
temperature: 0
chat_history: ${inputs.chat_history}
question: ${inputs.question}
model: gpt-4
connection: open_ai_connection
api: chat
variant_2:
node:
type: llm
source:
type: code
path: chat_variant_2.jinja2
inputs:
deployment_name: gpt-4
max_tokens: 256
temperature: 0
chat_history: ${inputs.chat_history}
question: ${inputs.question}
model: gpt-4
connection: open_ai_connection
api: chat
| promptflow/examples/flows/chat/chat-math-variant/flow.dag.yaml/0 | {
"file_path": "promptflow/examples/flows/chat/chat-math-variant/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 977
} | 5 |
import PyPDF2
import faiss
import os
from pathlib import Path
from utils.oai import OAIEmbedding
from utils.index import FAISSIndex
from utils.logging import log
from utils.lock import acquire_lock
from constants import INDEX_DIR
def create_faiss_index(pdf_path: str) -> str:
chunk_size = int(os.environ.get("CHUNK_SIZE"))
chunk_overlap = int(os.environ.get("CHUNK_OVERLAP"))
log(f"Chunk size: {chunk_size}, chunk overlap: {chunk_overlap}")
file_name = Path(pdf_path).name + f".index_{chunk_size}_{chunk_overlap}"
index_persistent_path = Path(INDEX_DIR) / file_name
index_persistent_path = index_persistent_path.resolve().as_posix()
lock_path = index_persistent_path + ".lock"
log("Index path: " + os.path.abspath(index_persistent_path))
with acquire_lock(lock_path):
if os.path.exists(os.path.join(index_persistent_path, "index.faiss")):
log("Index already exists, bypassing index creation")
return index_persistent_path
else:
if not os.path.exists(index_persistent_path):
os.makedirs(index_persistent_path)
log("Building index")
pdf_reader = PyPDF2.PdfReader(pdf_path)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
# Chunk the words into segments of X words with Y-word overlap, X=CHUNK_SIZE, Y=OVERLAP_SIZE
segments = split_text(text, chunk_size, chunk_overlap)
log(f"Number of segments: {len(segments)}")
index = FAISSIndex(index=faiss.IndexFlatL2(1536), embedding=OAIEmbedding())
index.insert_batch(segments)
index.save(index_persistent_path)
log("Index built: " + index_persistent_path)
return index_persistent_path
# Split the text into chunks with CHUNK_SIZE and CHUNK_OVERLAP as character count
def split_text(text, chunk_size, chunk_overlap):
# Calculate the number of chunks
num_chunks = (len(text) - chunk_overlap) // (chunk_size - chunk_overlap)
# Split the text into chunks
chunks = []
for i in range(num_chunks):
start = i * (chunk_size - chunk_overlap)
end = start + chunk_size
chunks.append(text[start:end])
# Add the last chunk
chunks.append(text[num_chunks * (chunk_size - chunk_overlap):])
return chunks
| promptflow/examples/flows/chat/chat-with-pdf/chat_with_pdf/build_index.py/0 | {
"file_path": "promptflow/examples/flows/chat/chat-with-pdf/chat_with_pdf/build_index.py",
"repo_id": "promptflow",
"token_count": 947
} | 6 |
from promptflow import tool
from chat_with_pdf.main import chat_with_pdf
@tool
def chat_with_pdf_tool(question: str, pdf_url: str, history: list, ready: str):
history = convert_chat_history_to_chatml_messages(history)
stream, context = chat_with_pdf(question, pdf_url, history)
answer = ""
for str in stream:
answer = answer + str + ""
return {"answer": answer, "context": context}
def convert_chat_history_to_chatml_messages(history):
messages = []
for item in history:
messages.append({"role": "user", "content": item["inputs"]["question"]})
messages.append({"role": "assistant", "content": item["outputs"]["answer"]})
return messages
def convert_chatml_messages_to_chat_history(messages):
history = []
for i in range(0, len(messages), 2):
history.append(
{
"inputs": {"question": messages[i]["content"]},
"outputs": {"answer": messages[i + 1]["content"]},
}
)
return history
| promptflow/examples/flows/chat/chat-with-pdf/chat_with_pdf_tool.py/0 | {
"file_path": "promptflow/examples/flows/chat/chat-with-pdf/chat_with_pdf_tool.py",
"repo_id": "promptflow",
"token_count": 409
} | 7 |
import unittest
import promptflow.azure as azure
from azure.identity import DefaultAzureCredential, InteractiveBrowserCredential
from base_test import BaseTest
import os
from promptflow._sdk._errors import InvalidRunStatusError
class TestChatWithPDFAzure(BaseTest):
def setUp(self):
super().setUp()
self.data_path = os.path.join(
self.flow_path, "data/bert-paper-qna-3-line.jsonl"
)
try:
credential = DefaultAzureCredential()
# Check if given credential can get token successfully.
credential.get_token("https://management.azure.com/.default")
except Exception:
# Fall back to InteractiveBrowserCredential in case DefaultAzureCredential not work
credential = InteractiveBrowserCredential()
self.pf = azure.PFClient.from_config(credential=credential)
def tearDown(self) -> None:
return super().tearDown()
def test_bulk_run_chat_with_pdf(self):
run = self.create_chat_run(display_name="chat_with_pdf_batch_run")
self.pf.stream(run) # wait for completion
self.assertEqual(run.status, "Completed")
details = self.pf.get_details(run)
self.assertEqual(details.shape[0], 3)
def test_eval(self):
run_2k, eval_groundedness_2k, eval_pi_2k = self.run_eval_with_config(
self.config_2k_context,
display_name="chat_with_pdf_2k_context",
)
run_3k, eval_groundedness_3k, eval_pi_3k = self.run_eval_with_config(
self.config_3k_context,
display_name="chat_with_pdf_3k_context",
)
self.check_run_basics(run_2k)
self.check_run_basics(run_3k)
self.check_run_basics(eval_groundedness_2k)
self.check_run_basics(eval_pi_2k)
self.check_run_basics(eval_groundedness_3k)
self.check_run_basics(eval_pi_3k)
def test_bulk_run_valid_mapping(self):
data = os.path.join(self.flow_path, "data/bert-paper-qna-1-line.jsonl")
run = self.create_chat_run(
data=data,
column_mapping={
"question": "${data.question}",
"pdf_url": "${data.pdf_url}",
"chat_history": "${data.chat_history}",
"config": self.config_2k_context,
},
)
self.pf.stream(run) # wait for completion
self.assertEqual(run.status, "Completed")
details = self.pf.get_details(run)
self.assertEqual(details.shape[0], 1)
def test_bulk_run_mapping_missing_one_column(self):
run = self.create_chat_run(
column_mapping={
"question": "${data.question}",
"pdf_url": "${data.pdf_url}",
},
)
self.pf.stream(run) # wait for completion
# run won't be failed, only line runs inside it will be failed.
self.assertEqual(run.status, "Completed")
# TODO: get line run results when supported.
def test_bulk_run_invalid_mapping(self):
run = self.create_chat_run(
column_mapping={
"question": "${data.question_not_exist}",
"pdf_url": "${data.pdf_url}",
"chat_history": "${data.chat_history}",
},
stream=False,
)
with self.assertRaises(InvalidRunStatusError):
self.pf.stream(run) # wait for completion
if __name__ == "__main__":
unittest.main()
| promptflow/examples/flows/chat/chat-with-pdf/tests/azure_chat_with_pdf_test.py/0 | {
"file_path": "promptflow/examples/flows/chat/chat-with-pdf/tests/azure_chat_with_pdf_test.py",
"repo_id": "promptflow",
"token_count": 1653
} | 8 |
from promptflow import tool
import json
def get_current_weather(location, unit="fahrenheit"):
"""Get the current weather in a given location"""
weather_info = {
"location": location,
"temperature": "72",
"unit": unit,
"forecast": ["sunny", "windy"],
}
return weather_info
def get_n_day_weather_forecast(location, format, num_days):
"""Get next num_days weather in a given location"""
weather_info = {
"location": location,
"temperature": "60",
"format": format,
"forecast": ["rainy"],
"num_days": num_days,
}
return weather_info
@tool
def run_function(response_message: dict) -> str:
if "function_call" in response_message:
function_name = response_message["function_call"]["name"]
function_args = json.loads(response_message["function_call"]["arguments"])
print(function_args)
result = globals()[function_name](**function_args)
else:
print("No function call")
if isinstance(response_message, dict):
result = response_message["content"]
else:
result = response_message
return result
| promptflow/examples/flows/chat/use_functions_with_chat_models/run_function.py/0 | {
"file_path": "promptflow/examples/flows/chat/use_functions_with_chat_models/run_function.py",
"repo_id": "promptflow",
"token_count": 474
} | 9 |
from promptflow import tool
def string_to_number(raw_string: str) -> float:
''' Try to parse the prediction string and groundtruth string to float number.
Support parse int, float, fraction and recognize non-numeric string with wrong format.
Wrong format cases: 'the answer is \box{2/3}', '0, 5, or any number greater than 11', '4/7//9'
'''
float_number = 0.0
try:
float_number = float(raw_string)
except Exception:
if '/' in raw_string:
split_list = raw_string.split('/')
if len(split_list) == 2:
numerator, denominator = split_list
try:
float_number = float(numerator) / float(denominator)
except Exception:
return None
else:
return None
else:
return None
return float_number
@tool
def line_process(groundtruth: str, prediction: str) -> int:
pred_float = string_to_number(prediction)
'''Early stop'''
if (pred_float is None):
return -1
gt_float = string_to_number(groundtruth)
if (gt_float is None):
return -1
''' both pred_float and gt_float are valid'''
if round(pred_float, 10) == round(gt_float, 10):
return 1
else:
return -1
if __name__ == "__main__":
processed_result = line_process("3/5", "6/10")
print("The processed result is", processed_result)
processed_result = line_process("1/2", "0.5")
print("The processed result is", processed_result)
processed_result = line_process("3", "5")
print("The processed result is", processed_result)
processed_result = line_process("2/3", "the answer is \box{2/3}")
print("The processed result is", processed_result)
| promptflow/examples/flows/evaluation/eval-chat-math/line_process.py/0 | {
"file_path": "promptflow/examples/flows/evaluation/eval-chat-math/line_process.py",
"repo_id": "promptflow",
"token_count": 740
} | 10 |
# Groundedness Evaluation
This is a flow leverage llm to eval groundedness: whether answer is stating facts that are all present in the given context.
Tools used in this flow:
- `python` tool
- built-in `llm` tool
### 0. Setup connection
Prepare your Azure Open AI resource follow this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one.
```bash
# Override keys with --set to avoid yaml file changes
pf connection create --file ../../../connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base>
```
### 1. Test flow/node
```bash
# test with default input value in flow.dag.yaml
pf flow test --flow .
```
### 2. create flow run with multi line data
```bash
pf run create --flow . --data ./data.jsonl --column-mapping question='${data.question}' answer='${data.answer}' context='${data.context}' --stream
```
You can also skip providing `column-mapping` if provided data has same column name as the flow.
Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI.
| promptflow/examples/flows/evaluation/eval-groundedness/README.md/0 | {
"file_path": "promptflow/examples/flows/evaluation/eval-groundedness/README.md",
"repo_id": "promptflow",
"token_count": 369
} | 11 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
environment:
python_requirements_txt: requirements.txt
inputs:
document_path:
type: string
default: ./document1.txt
language:
type: string
default: en
outputs:
extractive_summary:
type: string
reference: ${Extractive_Summarization.output}
abstractive_summary:
type: string
reference: ${Abstractive_Summarization.output}
sentiment:
type: string
reference: ${Sentiment_Analysis.output}
recognized_entities:
type: string
reference: ${Entity_Recognition.output}
nodes:
- name: Read_File
type: python
source:
type: code
path: read_file.py
inputs:
file_path: ${inputs.document_path}
- name: Translator
type: python
source:
type: package
tool: language_tools.tools.translator.get_translation
inputs:
connection: azure_ai_translator_connection
text: ${Read_File.output}
to:
- en
parse_response: true
- name: Parse_Translation
type: python
source:
type: code
path: parse_translation.py
inputs:
translation_results: ${Translator.output}
language: en
- name: PII_Entity_Recognition
type: python
source:
type: package
tool: language_tools.tools.pii_entity_recognition.get_pii_entity_recognition
inputs:
connection: azure_ai_language_connection
language: ${inputs.language}
text: ${Parse_Translation.output}
parse_response: true
categories:
- Address
- Age
- Date
- Email
- IPAddress
- PhoneNumber
- URL
- name: Abstractive_Summarization
type: python
source:
type: package
tool: language_tools.tools.abstractive_summarization.get_abstractive_summarization
inputs:
connection: azure_ai_language_connection
language: ${inputs.language}
text: ${PII_Entity_Recognition.output}
parse_response: true
query: quarterly results
summary_length: medium
- name: Sentiment_Analysis
type: python
source:
type: package
tool: language_tools.tools.sentiment_analysis.get_sentiment_analysis
inputs:
connection: azure_ai_language_connection
language: ${inputs.language}
text: ${Abstractive_Summarization.output}
parse_response: true
- name: Entity_Recognition
type: python
source:
type: package
tool: language_tools.tools.entity_recognition.get_entity_recognition
inputs:
connection: azure_ai_language_connection
language: ${inputs.language}
text: ${PII_Entity_Recognition.output}
parse_response: true
- name: Extractive_Summarization
type: python
source:
type: package
tool: language_tools.tools.extractive_summarization.get_extractive_summarization
inputs:
connection: azure_ai_language_connection
language: ${inputs.language}
text: ${PII_Entity_Recognition.output}
query: Cloud AI
parse_response: true
| promptflow/examples/flows/integrations/azure-ai-language/analyze_documents/flow.dag.yaml/0 | {
"file_path": "promptflow/examples/flows/integrations/azure-ai-language/analyze_documents/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 1037
} | 12 |
from promptflow import tool
@tool
def functions_format() -> list:
functions = [
{
"name": "search",
"description": """The action will search this entity name on Wikipedia and returns the first {count}
sentences if it exists. If not, it will return some related entities to search next.""",
"parameters": {
"type": "object",
"properties": {
"entity": {
"type": "string",
"description": "Entity name which is used for Wikipedia search.",
},
"count": {
"type": "integer",
"default": 10,
"description": "Returned sentences count if entity name exists Wikipedia.",
},
},
"required": ["entity"],
},
},
{
"name": "python",
"description": """A Python shell. Use this to execute python commands. Input should be a valid python
command and you should print result with `print(...)` to see the output.""",
"parameters": {
"type": "object",
"properties": {
"command": {
"type": "string",
"description": "The command you want to execute in python",
}
},
"required": ["command"]
},
},
{
"name": "finish",
"description": """use this to signal that you have finished all your goals and remember show your
results""",
"parameters": {
"type": "object",
"properties": {
"response": {
"type": "string",
"description": "final response to let people know you have finished your goals and remember "
"show your results",
},
},
"required": ["response"],
},
},
]
return functions
| promptflow/examples/flows/standard/autonomous-agent/functions.py/0 | {
"file_path": "promptflow/examples/flows/standard/autonomous-agent/functions.py",
"repo_id": "promptflow",
"token_count": 1183
} | 13 |
from promptflow import tool
@tool
def product_recommendation(query: str) -> str:
print(f"Your query is {query}.\nRecommending products...")
return "I recommend promptflow to you, which can solve your problem very well."
| promptflow/examples/flows/standard/conditional-flow-for-switch/product_recommendation.py/0 | {
"file_path": "promptflow/examples/flows/standard/conditional-flow-for-switch/product_recommendation.py",
"repo_id": "promptflow",
"token_count": 67
} | 14 |
import io
from promptflow import tool
from promptflow.contracts.multimedia import Image
from PIL import Image as PIL_Image
@tool
def passthrough(input_image: Image) -> Image:
image_stream = io.BytesIO(input_image)
pil_image = PIL_Image.open(image_stream)
flipped_image = pil_image.transpose(PIL_Image.FLIP_LEFT_RIGHT)
buffer = io.BytesIO()
flipped_image.save(buffer, format="PNG")
return Image(buffer.getvalue(), mime_type="image/png")
| promptflow/examples/flows/standard/describe-image/flip_image.py/0 | {
"file_path": "promptflow/examples/flows/standard/describe-image/flip_image.py",
"repo_id": "promptflow",
"token_count": 165
} | 15 |
# Generate Python docstring
This example can help you automatically generate Python code's docstring and return the modified code.
Tools used in this flow:
- `load_code` tool, it can load code from a file path.
- Load content from a local file.
- Loading content from a remote URL, currently loading HTML content, not just code.
- `divide_code` tool, it can divide code into code blocks.
- To avoid files that are too long and exceed the token limit, it is necessary to split the file.
- Avoid using the same function (such as __init__(self)) to generate docstrings in the same one file, which may cause confusion when adding docstrings to the corresponding functions in the future.
- `generate_docstring` tool, it can generate docstring for a code block, and merge docstring into origin code.
## What you will learn
In this flow, you will learn
- How to compose an auto generate docstring flow.
- How to use different LLM APIs to request LLM, including synchronous/asynchronous APIs, chat/completion APIs.
- How to use asynchronous multiple coroutine approach to request LLM API.
- How to construct a prompt.
## Prerequisites
### Install promptflow sdk and other dependencies:
```bash
pip install -r requirements.txt
```
### Create connection for LLM to use
```bash
# Override keys with --set to avoid yaml file changes
pf connection create --file ../../../connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base>
```
Note:
The [azure_openai.yml](../../../connections/azure_openai.yml) file is located in connections folder.
We are using connection named `open_ai_connection`in [flow.dag.yaml](flow.dag.yaml).
## Execute with Promptflow
### Execute with SDK
`python main.py --source <your_file_path>`
**Note**: the file path should be a python file path, default is `./azure_open_ai.py`.
A webpage will be generated, displaying diff:
![result](result.png)
### Execute with CLI
```bash
# run flow with default file path in flow.dag.yaml
pf flow test --flow .
# run flow with file path
pf flow test --flow . --inputs source="./azure_open_ai.py"
```
```bash
# run flow with batch data
pf run create --flow . --data ./data.jsonl --name auto_generate_docstring --column-mapping source='${data.source}'
```
Output the code after add the docstring.
You can also skip providing `column-mapping` if provided data has same column name as the flow.
Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI.
| promptflow/examples/flows/standard/gen-docstring/README.md/0 | {
"file_path": "promptflow/examples/flows/standard/gen-docstring/README.md",
"repo_id": "promptflow",
"token_count": 730
} | 16 |
from typing import Union
from promptflow import tool
from typing import Dict, List
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection, CognitiveSearchConnection
def generate_index_json(
index_type: str,
index: str = "",
index_connection: CognitiveSearchConnection = "",
index_name: str = "",
content_field: str = "",
embedding_field: str = "",
metadata_field: str = "",
semantic_configuration: str = "",
embedding_connection: Union[AzureOpenAIConnection, OpenAIConnection] = "",
embedding_deployment: str = ""
) -> str:
"""This is a dummy function to generate a index json based on the inputs.
"""
import json
inputs = ""
if index_type == "Azure Cognitive Search":
# 1. Call to create a new index
# 2. Call to get the index yaml and return as a json
inputs = {
"index_type": index_type,
"index": "retrieved_index",
"index_connection": index_connection,
"index_name": index_name,
"content_field": content_field,
"embedding_field": embedding_field,
"metadata_field": metadata_field,
"semantic_configuration": semantic_configuration,
"embedding_connection": embedding_connection,
"embedding_deployment": embedding_deployment
}
elif index_type == "Workspace MLIndex":
# Call to get the index yaml and return as a json
inputs = {
"index_type": index_type,
"index": index,
"index_connection": "retrieved_index_connection",
"index_name": "retrieved_index_name",
"content_field": "retrieved_content_field",
"embedding_field": "retrieved_embedding_field",
"metadata_field": "retrieved_metadata_field",
"semantic_configuration": "retrieved_semantic_configuration",
"embedding_connection": "retrieved_embedding_connection",
"embedding_deployment": "retrieved_embedding_deployment"
}
result = json.dumps(inputs)
return result
def reverse_generate_index_json(index_json: str) -> Dict:
"""This is a dummy function to generate origin inputs from index_json.
"""
import json
# Calculate the UI inputs based on the index_json
result = json.loads(index_json)
return result
def list_index_types(subscription_id, resource_group_name, workspace_name) -> List[str]:
return [
{"value": "Azure Cognitive Search"},
{"value": "PineCone"},
{"value": "FAISS"},
{"value": "Workspace MLIndex"},
{"value": "MLIndex from path"}
]
def list_indexes(
subscription_id,
resource_group_name,
workspace_name
) -> List[Dict[str, Union[str, int, float, list, Dict]]]:
import random
words = ["apple", "banana", "cherry", "date", "elderberry", "fig", "grape", "honeydew", "kiwi", "lemon"]
result = []
for i in range(10):
random_word = f"{random.choice(words)}{i}"
cur_item = {
"value": random_word,
"display_value": f"index_{random_word}",
"hyperlink": f'https://www.bing.com/search?q={random_word}',
"description": f"this is {i} item",
}
result.append(cur_item)
return result
def list_fields(subscription_id, resource_group_name, workspace_name) -> List[str]:
return [
{"value": "id"},
{"value": "content"},
{"value": "catelog"},
{"value": "sourcepage"},
{"value": "sourcefile"},
{"value": "title"},
{"value": "content_hash"},
{"value": "meta_json_string"},
{"value": "content_vector_open_ai"}
]
def list_semantic_configuration(subscription_id, resource_group_name, workspace_name) -> List[str]:
return [{"value": "azureml-default"}]
def list_embedding_deployment(embedding_connection: str) -> List[str]:
return [{"value": "text-embedding-ada-002"}, {"value": "ada-1k-tpm"}]
@tool
def my_tool(index_json: str, queries: str, top_k: int) -> str:
return f"Hello {index_json}"
| promptflow/examples/tools/tool-package-quickstart/my_tool_package/tools/tool_with_generated_by_input.py/0 | {
"file_path": "promptflow/examples/tools/tool-package-quickstart/my_tool_package/tools/tool_with_generated_by_input.py",
"repo_id": "promptflow",
"token_count": 1712
} | 17 |
import pytest
import unittest
from my_tool_package.tools.tool_with_custom_strong_type_connection import MyCustomConnection, my_tool
@pytest.fixture
def my_custom_connection() -> MyCustomConnection:
my_custom_connection = MyCustomConnection(
{
"api_key" : "my-api-key",
"api_base" : "my-api-base"
}
)
return my_custom_connection
class TestMyToolWithCustomStrongTypeConnection:
def test_my_tool(self, my_custom_connection):
result = my_tool(my_custom_connection, input_text="Microsoft")
assert result == "Hello Microsoft"
# Run the unit tests
if __name__ == "__main__":
unittest.main()
| promptflow/examples/tools/tool-package-quickstart/tests/test_tool_with_custom_strong_type_connection.py/0 | {
"file_path": "promptflow/examples/tools/tool-package-quickstart/tests/test_tool_with_custom_strong_type_connection.py",
"repo_id": "promptflow",
"token_count": 257
} | 18 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs:
text:
type: string
default: Microsoft
outputs:
my_output:
type: string
reference: ${my_script_tool.output}
nodes:
- name: my_script_tool
type: python
source:
type: code
path: my_script_tool.py
inputs:
connection: normal_custom_connection
input_text: ${inputs.text}
| promptflow/examples/tools/use-cases/custom-strong-type-connection-script-tool-showcase/flow.dag.yaml/0 | {
"file_path": "promptflow/examples/tools/use-cases/custom-strong-type-connection-script-tool-showcase/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 157
} | 19 |
# Deploy flow as applications
This folder contains examples of how to build & deploy flow as applications like Web Application packaged in Docker format. | promptflow/examples/tutorials/flow-deploy/README.md/0 | {
"file_path": "promptflow/examples/tutorials/flow-deploy/README.md",
"repo_id": "promptflow",
"token_count": 28
} | 20 |
---
resources: examples/connections/azure_openai.yml, examples/flows/standard/web-classification
---
# Deploy flow using Kubernetes
This example demos how to deploy flow as a Kubernetes app.
We will use [web-classification](../../../flows/standard/web-classification/README.md) as example in this tutorial.
Please ensure that you have installed all the required dependencies. You can refer to the "Prerequisites" section in the README of the [web-classification](../../../flows/standard/web-classification/README.md#Prerequisites) for a comprehensive list of prerequisites and installation instructions.
## Build a flow as docker format
Note that all dependent connections must be created before building as docker.
```bash
# create connection if not created before
pf connection create --file ../../../connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base> --name open_ai_connection
```
Use the command below to build a flow as docker format app:
```bash
pf flow build --source ../../../flows/standard/web-classification --output dist --format docker
```
## Deploy with Kubernetes
### Build Docker image
Like other Dockerfile, you need to build the image first. You can tag the image with any name you want. In this example, we use `web-classification-serve`.
Then run the command below:
```shell
cd dist
docker build . -t web-classification-serve
```
### Create Kubernetes deployment yaml.
The Kubernetes deployment yaml file acts as a guide for managing your docker container in a Kubernetes pod. It clearly specifies important information like the container image, port configurations, environment variables, and various settings. Below, you'll find a simple deployment template that you can easily customize to meet your needs.
**Note**: You need encode the secret using base64 firstly and input the <encoded_secret> as 'open-ai-connection-api-key' in the deployment configuration. For example, you can run below commands in linux:
```shell
encoded_secret=$(echo -n <your_api_key> | base64)
```
```yaml
---
kind: Namespace
apiVersion: v1
metadata:
name: web-classification
---
apiVersion: v1
kind: Secret
metadata:
name: open-ai-connection-api-key
namespace: web-classification
type: Opaque
data:
open-ai-connection-api-key: <encoded_secret>
---
apiVersion: v1
kind: Service
metadata:
name: web-classification-service
namespace: web-classification
spec:
type: NodePort
ports:
- name: http
port: 8080
targetPort: 8080
nodePort: 30123
selector:
app: web-classification-serve-app
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: web-classification-serve-app
namespace: web-classification
spec:
selector:
matchLabels:
app: web-classification-serve-app
template:
metadata:
labels:
app: web-classification-serve-app
spec:
containers:
- name: web-classification-serve-container
image: web-classification-serve
imagePullPolicy: Never
ports:
- containerPort: 8080
env:
- name: OPEN_AI_CONNECTION_API_KEY
valueFrom:
secretKeyRef:
name: open-ai-connection-api-key
key: open-ai-connection-api-key
```
### Apply the deployment.
Before you can deploy your application, ensure that you have set up a Kubernetes cluster and installed [kubectl](https://kubernetes.io/docs/reference/kubectl/) if it's not already installed. In this documentation, we will use [Minikube](https://minikube.sigs.k8s.io/docs/) as an example. To start the cluster, execute the following command:
```shell
minikube start
```
Once your Kubernetes cluster is up and running, you can proceed to deploy your application by using the following command:
```shell
kubectl apply -f deployment.yaml
```
This command will create the necessary pods to run your application within the cluster.
**Note**: You need replace <pod_name> below with your specific pod_name. You can retrieve it by running `kubectl get pods -n web-classification`.
### Retrieve flow service logs of the container
The kubectl logs command is used to retrieve the logs of a container running within a pod, which can be useful for debugging, monitoring, and troubleshooting applications deployed in a Kubernetes cluster.
```shell
kubectl -n web-classification logs <pod-name>
```
#### Connections
If the service involves connections, all related connections will be exported as yaml files and recreated in containers.
Secrets in connections won't be exported directly. Instead, we will export them as a reference to environment variables:
```yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/OpenAIConnection.schema.json
type: open_ai
name: open_ai_connection
module: promptflow.connections
api_key: ${env:OPEN_AI_CONNECTION_API_KEY} # env reference
```
You'll need to set up the environment variables in the container to make the connections work.
### Test the endpoint
- Option1:
Once you've started the service, you can establish a connection between a local port and a port on the pod. This allows you to conveniently test the endpoint from your local terminal.
To achieve this, execute the following command:
```shell
kubectl port-forward <pod_name> 8080:8080 -n web-classification
```
With the port forwarding in place, you can use the curl command to initiate the endpoint test:
```shell
curl http://localhost:8080/score --data '{"url":"https://play.google.com/store/apps/details?id=com.twitter.android"}' -X POST -H "Content-Type: application/json"
```
- Option2:
`minikube service web-classification-service --url -n web-classification` runs as a process, creating a tunnel to the cluster. The command exposes the service directly to any program running on the host operating system.
The command above will retrieve the URL of a service running within a Minikube Kubernetes cluster (e.g. http://<ip>:<assigned_port>), which you can click to interact with the flow service in your web browser. Alternatively, you can use the following command to test the endpoint:
**Note**: Minikube will use its own external port instead of nodePort to listen to the service. So please substitute <assigned_port> with the port obtained above.
```shell
curl http://localhost:<assigned_port>/score --data '{"url":"https://play.google.com/store/apps/details?id=com.twitter.android"}' -X POST -H "Content-Type: application/json"
```
| promptflow/examples/tutorials/flow-deploy/kubernetes/README.md/0 | {
"file_path": "promptflow/examples/tutorials/flow-deploy/kubernetes/README.md",
"repo_id": "promptflow",
"token_count": 1912
} | 21 |
name: release-env
channels:
- defaults
- conda-forge
dependencies:
- pip
- pip:
- setuptools
- twine==4.0.0
- portalocker~=1.2
- setuptools_rust
- pytest
- pytest-xdist
- pytest-sugar
- pytest-timeout
- azure-keyvault
- azure-identity
| promptflow/scripts/building/release-env.yml/0 | {
"file_path": "promptflow/scripts/building/release-env.yml",
"repo_id": "promptflow",
"token_count": 134
} | 22 |
promptflow.connections package
==============================
.. autoclass:: promptflow.connections.AzureContentSafetyConnection
:members:
:undoc-members:
:show-inheritance:
:noindex:
.. autoclass:: promptflow.connections.AzureOpenAIConnection
:members:
:undoc-members:
:show-inheritance:
:noindex:
.. autoclass:: promptflow.connections.CognitiveSearchConnection
:members:
:undoc-members:
:show-inheritance:
:noindex:
.. autoclass:: promptflow.connections.CustomConnection
:members:
:undoc-members:
:show-inheritance:
:noindex:
.. autoclass:: promptflow.connections.FormRecognizerConnection
:members:
:undoc-members:
:show-inheritance:
:noindex:
.. autoclass:: promptflow.connections.OpenAIConnection
:members:
:undoc-members:
:show-inheritance:
:noindex:
.. autoclass:: promptflow.connections.SerpConnection
:members:
:undoc-members:
:show-inheritance:
:noindex:
| promptflow/scripts/docs/promptflow.connections.rst/0 | {
"file_path": "promptflow/scripts/docs/promptflow.connections.rst",
"repo_id": "promptflow",
"token_count": 360
} | 23 |
# -*- mode: python ; coding: utf-8 -*-
from PyInstaller.utils.hooks import collect_data_files
from PyInstaller.utils.hooks import copy_metadata
datas = [('../resources/CLI_LICENSE.rtf', '.'), ('../../../../src/promptflow/NOTICE.txt', '.'),
('../../../../src/promptflow/promptflow/_sdk/data/executable/', './promptflow/_sdk/data/executable/'),
('../../../../src/promptflow-tools/promptflow/tools/', './promptflow/tools/'),
('./pf.bat', '.'), ('./pfs.bat', '.'), ('./pfazure.bat', '.'), ('./pfsvc.bat', '.')]
datas += collect_data_files('streamlit')
datas += copy_metadata('streamlit')
datas += collect_data_files('streamlit_quill')
datas += collect_data_files('promptflow')
hidden_imports = ['streamlit.runtime.scriptrunner.magic_funcs', 'win32timezone', 'promptflow']
block_cipher = None
pfcli_a = Analysis(
['pfcli.py'],
pathex=[],
binaries=[],
datas=datas,
hiddenimports=hidden_imports,
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False,
)
pfcli_pyz = PYZ(pfcli_a.pure, pfcli_a.zipped_data, cipher=block_cipher)
pfcli_exe = EXE(
pfcli_pyz,
pfcli_a.scripts,
[],
exclude_binaries=True,
name='pfcli',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True,
disable_windowed_traceback=False,
argv_emulation=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None,
contents_directory='.',
icon='../resources/logo32.ico',
version="./version_info.txt",
)
coll = COLLECT(
pfcli_exe,
pfcli_a.binaries,
pfcli_a.zipfiles,
pfcli_a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='promptflow',
)
| promptflow/scripts/installer/windows/scripts/promptflow.spec/0 | {
"file_path": "promptflow/scripts/installer/windows/scripts/promptflow.spec",
"repo_id": "promptflow",
"token_count": 788
} | 24 |
class Telemetry(object):
pass
| promptflow/scripts/readme/ghactions_driver/telemetry_obj.py/0 | {
"file_path": "promptflow/scripts/readme/ghactions_driver/telemetry_obj.py",
"repo_id": "promptflow",
"token_count": 12
} | 25 |
{% extends "workflow_skeleton.yml.jinja2" %}
{% block steps %}
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Generate config.json for canary workspace (scheduled runs only)
if: github.event_name == 'schedule'
run: echo '${{ '{{' }} secrets.TEST_WORKSPACE_CONFIG_JSON_CANARY }}' > ${{ '{{' }} github.workspace }}/examples/config.json
- name: Generate config.json for production workspace
if: github.event_name != 'schedule'
run: echo '${{ '{{' }} secrets.EXAMPLE_WORKSPACE_CONFIG_JSON_PROD }}' > ${{ '{{' }} github.workspace }}/examples/config.json
- name: Azure Login
uses: azure/login@v1
with:
creds: ${{ '{{' }} secrets.AZURE_CREDENTIALS }}
- name: Setup Python 3.9 environment
uses: actions/setup-python@v4
with:
python-version: "3.9"
- name: Prepare requirements
run: |
python -m pip install --upgrade pip
pip install -r ${{ '{{' }} github.workspace }}/examples/requirements.txt
pip install -r ${{ '{{' }} github.workspace }}/examples/dev_requirements.txt
- name: Create Aoai Connection
run: pf connection create -f ${{ '{{' }} github.workspace }}/examples/connections/azure_openai.yml --set api_key="${{ '{{' }} secrets.AOAI_API_KEY_TEST }}" api_base="${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }}"
- name: Test Notebook
working-directory: {{ gh_working_dir }}
run: |
papermill -k python {{ name }}.ipynb {{ name }}.output.ipynb
- name: Upload artifact
if: ${{ '{{' }} always() }}
uses: actions/upload-artifact@v3
with:
name: artifact
path: {{ gh_working_dir }}
{% endblock steps %} | promptflow/scripts/readme/ghactions_driver/workflow_templates/workflow_config_json.yml.jinja2/0 | {
"file_path": "promptflow/scripts/readme/ghactions_driver/workflow_templates/workflow_config_json.yml.jinja2",
"repo_id": "promptflow",
"token_count": 647
} | 26 |
import argparse
import ast
import importlib
import json
import os
import sys
from ruamel.yaml import YAML
sys.path.append("src/promptflow-tools")
sys.path.append(os.getcwd())
from utils.generate_tool_meta_utils import generate_custom_llm_tools_in_module_as_dict, generate_python_tools_in_module_as_dict # noqa: E402, E501
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate meta for a tool.")
parser.add_argument("--module", "-m", help="Module to generate tools.", type=str, required=True)
parser.add_argument("--output", "-o", help="Path to the output tool json file.", required=True)
parser.add_argument(
"--tool-type",
"-t",
help="Provide tool type: 'python' or 'custom_llm'. By default, 'python' will be set as the tool type.",
type=str,
choices=["python", "custom_llm"],
default="python",
)
parser.add_argument(
"--name",
"-n",
help="Provide a custom name for the tool. By default, the function name will be used as the tool name.",
type=str,
)
parser.add_argument("--description", "-d", help="Provide a brief description of the tool.", type=str)
parser.add_argument(
"--icon",
"-i",
type=str,
help="your tool's icon image path, if you need to show different icons in dark and light mode, \n"
"please use `icon-light` and `icon-dark` parameters. \n"
"If these icon parameters are not provided, the system will use the default icon.",
required=False)
parser.add_argument(
"--icon-light",
type=str,
help="your tool's icon image path for light mode, \n"
"if you need to show the same icon in dark and light mode, please use `icon` parameter. \n"
"If these icon parameters are not provided, the system will use the default icon.",
required=False)
parser.add_argument(
"--icon-dark",
type=str,
help="your tool's icon image path for dark mode, \n"
"if you need to show the same icon in dark and light mode, please use `icon` parameter. \n"
"If these icon parameters are not provided, the system will use the default icon.",
required=False)
parser.add_argument(
"--category",
"-c",
type=str,
help="your tool's category, if not provided, the tool will be displayed under the root folder.",
required=False)
parser.add_argument(
"--tags",
type=ast.literal_eval,
help="your tool's tags. It should be a dictionary-like string, e.g.: --tags \"{'tag1':'v1','tag2':'v2'}\".",
required=False)
args = parser.parse_args()
m = importlib.import_module(args.module)
icon = ""
if args.icon:
if args.icon_light or args.icon_dark:
raise ValueError("You cannot provide both `icon` and `icon-light` or `icon-dark`.")
from convert_image_to_data_url import check_image_type_and_generate_data_url # noqa: E402
icon = check_image_type_and_generate_data_url(args.icon)
elif args.icon_light or args.icon_dark:
if args.icon_light:
from convert_image_to_data_url import check_image_type_and_generate_data_url # noqa: E402
if isinstance(icon, dict):
icon["light"] = check_image_type_and_generate_data_url(args.icon_light)
else:
icon = {"light": check_image_type_and_generate_data_url(args.icon_light)}
if args.icon_dark:
from convert_image_to_data_url import check_image_type_and_generate_data_url # noqa: E402
if isinstance(icon, dict):
icon["dark"] = check_image_type_and_generate_data_url(args.icon_dark)
else:
icon = {"dark": check_image_type_and_generate_data_url(args.icon_dark)}
if args.tool_type == "custom_llm":
tools_dict = generate_custom_llm_tools_in_module_as_dict(
m,
name=args.name,
description=args.description,
icon=icon,
category=args.category,
tags=args.tags)
else:
tools_dict = generate_python_tools_in_module_as_dict(
m,
name=args.name,
description=args.description,
icon=icon,
category=args.category,
tags=args.tags)
# The generated dict cannot be dumped as yaml directly since yaml cannot handle string enum.
tools_dict = json.loads(json.dumps(tools_dict))
yaml = YAML()
yaml.preserve_quotes = True
yaml.indent(mapping=2, sequence=4, offset=2)
with open(args.output, "w") as f:
yaml.dump(tools_dict, f)
print(f"Tools meta generated to '{args.output}'.")
| promptflow/scripts/tool/generate_package_tool_meta.py/0 | {
"file_path": "promptflow/scripts/tool/generate_package_tool_meta.py",
"repo_id": "promptflow",
"token_count": 2007
} | 27 |
import json
import os
import shutil
import subprocess
from datetime import datetime
from pathlib import Path
import requests
scripts_dir = os.path.join(os.getcwd(), "scripts")
index_url = "https://azuremlsdktestpypi.azureedge.net/test-promptflow/promptflow-tools"
ado_promptflow_repo_url_format = "https://{0}@dev.azure.com/msdata/Vienna/_git/PromptFlow"
def replace_lines_from_file_under_hint(file_path, hint: str, lines_to_replace: list):
lines_count = len(lines_to_replace)
with open(file_path, "r") as f:
lines = f.readlines()
has_hint = False
for i in range(len(lines)):
if lines[i].strip() == hint:
has_hint = True
lines[i + 1 : i + 1 + lines_count] = lines_to_replace
if not has_hint:
lines.append(hint + "\n")
lines += lines_to_replace
with open(file_path, "w") as f:
f.writelines(lines)
def create_remote_branch_in_ADO_with_new_tool_pkg_version(
ado_pat: str, tool_pkg_version: str, blob_prefix="test-promptflow"
) -> str:
# Clone the Azure DevOps repo
parent_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
tmp_dir = os.path.join(parent_dir, "temp")
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
subprocess.run(["git", "config", "--global", "user.email", "[email protected]"])
subprocess.run(["git", "config", "--global", "user.name", "github-promptflow"])
# Change directory to the 'tmp' directory
os.chdir(tmp_dir)
repo_dir = os.path.join(tmp_dir, "PromptFlow")
repo_url = ado_promptflow_repo_url_format.format(ado_pat)
subprocess.run(["git", "clone", repo_url, repo_dir])
# Change directory to the repo directory
os.chdir(repo_dir)
# Pull the devs/test branch
subprocess.run(["git", "reset", "."])
subprocess.run(["git", "checkout", "."])
subprocess.run(["git", "clean", "-f", "."])
subprocess.run(["git", "checkout", "main"])
subprocess.run(["git", "fetch"])
subprocess.run(["git", "pull"])
# Make changes
# 1. add test endpoint 'promptflow-gallery-tool-test.yaml'
# 2. update tool package version
source_file = Path(scripts_dir) / "tool/utils/configs/promptflow-gallery-tool-test.yaml"
destination_folder = "deploy/model"
shutil.copy(source_file, destination_folder)
new_lines = [
f"--extra-index-url https://azuremlsdktestpypi.azureedge.net/{blob_prefix}\n",
f"promptflow_tools=={tool_pkg_version}\n",
]
replace_lines_from_file_under_hint(
file_path="docker_build/linux/extra_requirements.txt",
hint="# Prompt-flow tool package",
lines_to_replace=new_lines,
)
# Create a new remote branch
new_branch_name = f"devs/test_tool_pkg_{tool_pkg_version}_{datetime.now().strftime('%Y%m%d%H%M%S')}"
subprocess.run(["git", "branch", "-D", "origin", new_branch_name])
subprocess.run(["git", "checkout", "-b", new_branch_name])
subprocess.run(["git", "add", "."])
subprocess.run(["git", "commit", "-m", f"Update tool package version to {tool_pkg_version}"])
subprocess.run(["git", "push", "-u", repo_url, new_branch_name])
return new_branch_name
def deploy_test_endpoint(branch_name: str, ado_pat: str):
# PromptFlow-deploy-endpoint pipeline in ADO: https://msdata.visualstudio.com/Vienna/_build?definitionId=24767&_a=summary # noqa: E501
url = "https://dev.azure.com/msdata/Vienna/_apis/pipelines/24767/runs?api-version=7.0-preview.1"
request_body_file = Path(scripts_dir) / "tool/utils/configs/deploy-endpoint-request-body.json"
with open(request_body_file, "r") as f:
body = json.load(f)
body["resources"]["repositories"]["self"]["refName"] = f"refs/heads/{branch_name}"
print(f"request body: {body}")
response = requests.post(url, json=body, auth=("dummy_user_name", ado_pat))
print(response.status_code)
print(response.content)
| promptflow/scripts/tool/utils/repo_utils.py/0 | {
"file_path": "promptflow/scripts/tool/utils/repo_utils.py",
"repo_id": "promptflow",
"token_count": 1625
} | 28 |