text
stringlengths 13
1.77M
| id
stringlengths 22
127
| metadata
dict | __index_level_0__
int64 0
28
|
---|---|---|---|
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs:
chat_history:
type: list
is_chat_history: true
default: []
question:
type: string
is_chat_input: true
outputs:
answer:
type: string
reference: ${chat.output}
is_chat_output: true
nodes:
- name: chat
type: llm
source:
type: code
path: chat.jinja2
inputs:
deployment_name: {{ deployment }}
max_tokens: '256'
temperature: '0.7'
chat_history: ${inputs.chat_history}
question: ${inputs.question}
api: chat
connection: {{ connection }}
environment:
python_requirements_txt: requirements.txt
| promptflow/src/promptflow/promptflow/_cli/data/chat_flow/template/flow.dag.yaml.jinja2/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/data/chat_flow/template/flow.dag.yaml.jinja2",
"repo_id": "promptflow",
"token_count": 257
} | 8 |
import yaml
import logging
import tempfile
import hashlib
from pathlib import Path
logger = logging.getLogger(__name__)
package_name = "{{ package_name }}"
def list_package_tools(raise_error=False):
"""
List the meta of all tools in the package.
The key of meta dict is the module name of tools and value is the meta data of the tool.
"""
# This function is auto generated by pf CLI, please do not modify manually.
tools = {}
meta_cache_file = Path(__file__).parent / "yamls" / "tools_meta.yaml"
if meta_cache_file.exists():
logger.debug(f"List tools meta from cache file {meta_cache_file.as_posix()}.")
# Get tool meta from cache file.
with open(meta_cache_file, "r") as f:
tools = yaml.safe_load(f)
else:
from promptflow import PFClient
pf_client = PFClient()
tools = pf_client.tools._list_tools_in_package(package_name, raise_error=raise_error)
return tools
| promptflow/src/promptflow/promptflow/_cli/data/package_tool/utils.py.jinja2/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/data/package_tool/utils.py.jinja2",
"repo_id": "promptflow",
"token_count": 361
} | 9 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import asyncio
import functools
import importlib
import logging
import os
from importlib.metadata import version
import openai
from promptflow._core.operation_context import OperationContext
from promptflow.contracts.trace import TraceType
from .tracer import _traced_async, _traced_sync
USER_AGENT_HEADER = "x-ms-useragent"
PROMPTFLOW_PREFIX = "ms-azure-ai-promptflow-"
IS_LEGACY_OPENAI = version("openai").startswith("0.")
def inject_function_async(args_to_ignore=None, trace_type=TraceType.LLM):
def decorator(func):
return _traced_async(func, args_to_ignore=args_to_ignore, trace_type=trace_type)
return decorator
def inject_function_sync(args_to_ignore=None, trace_type=TraceType.LLM):
def decorator(func):
return _traced_sync(func, args_to_ignore=args_to_ignore, trace_type=trace_type)
return decorator
def get_aoai_telemetry_headers() -> dict:
"""Get the http headers for AOAI request.
The header, whose name starts with "ms-azure-ai-" or "x-ms-", is used to track the request in AOAI. The
value in this dict will be recorded as telemetry, so please do not put any sensitive information in it.
Returns:
A dictionary of http headers.
"""
# get promptflow info from operation context
operation_context = OperationContext.get_instance()
tracking_info = operation_context._get_tracking_info()
tracking_info = {k.replace("_", "-"): v for k, v in tracking_info.items()}
def is_primitive(value):
return value is None or isinstance(value, (int, float, str, bool))
# Ensure that the telemetry info is primitive
tracking_info = {k: v for k, v in tracking_info.items() if is_primitive(v)}
# init headers
headers = {USER_AGENT_HEADER: operation_context.get_user_agent()}
# update header with promptflow info
headers.update({f"{PROMPTFLOW_PREFIX}{k}": str(v) if v is not None else "" for k, v in tracking_info.items()})
return headers
def inject_operation_headers(f):
def inject_headers(kwargs):
# Inject headers from operation context, overwrite injected header with headers from kwargs.
injected_headers = get_aoai_telemetry_headers()
original_headers = kwargs.get("headers" if IS_LEGACY_OPENAI else "extra_headers")
if original_headers and isinstance(original_headers, dict):
injected_headers.update(original_headers)
kwargs["headers" if IS_LEGACY_OPENAI else "extra_headers"] = injected_headers
if asyncio.iscoroutinefunction(f):
@functools.wraps(f)
async def wrapper(*args, **kwargs):
inject_headers(kwargs)
return await f(*args, **kwargs)
else:
@functools.wraps(f)
def wrapper(*args, **kwargs):
inject_headers(kwargs)
return f(*args, **kwargs)
return wrapper
def inject_async(f):
wrapper_fun = inject_operation_headers((inject_function_async(["api_key", "headers", "extra_headers"])(f)))
wrapper_fun._original = f
return wrapper_fun
def inject_sync(f):
wrapper_fun = inject_operation_headers((inject_function_sync(["api_key", "headers", "extra_headers"])(f)))
wrapper_fun._original = f
return wrapper_fun
def _openai_api_list():
if IS_LEGACY_OPENAI:
sync_apis = (
("openai", "Completion", "create"),
("openai", "ChatCompletion", "create"),
("openai", "Embedding", "create"),
)
async_apis = (
("openai", "Completion", "acreate"),
("openai", "ChatCompletion", "acreate"),
("openai", "Embedding", "acreate"),
)
else:
sync_apis = (
("openai.resources.chat", "Completions", "create"),
("openai.resources", "Completions", "create"),
("openai.resources", "Embeddings", "create"),
)
async_apis = (
("openai.resources.chat", "AsyncCompletions", "create"),
("openai.resources", "AsyncCompletions", "create"),
("openai.resources", "AsyncEmbeddings", "create"),
)
yield sync_apis, inject_sync
yield async_apis, inject_async
def _generate_api_and_injector(apis):
for apis, injector in apis:
for module_name, class_name, method_name in apis:
try:
module = importlib.import_module(module_name)
api = getattr(module, class_name)
if hasattr(api, method_name):
yield api, method_name, injector
except AttributeError as e:
# Log the attribute exception with the missing class information
logging.warning(
f"AttributeError: The module '{module_name}' does not have the class '{class_name}'. {str(e)}"
)
except Exception as e:
# Log other exceptions as a warning, as we're not sure what they might be
logging.warning(f"An unexpected error occurred: {str(e)}")
def available_openai_apis_and_injectors():
"""
Generates a sequence of tuples containing OpenAI API classes, method names, and
corresponding injector functions based on whether the legacy OpenAI interface is used.
This function handles the discrepancy reported in https://github.com/openai/openai-python/issues/996,
where async interfaces were not recognized as coroutines. It ensures that decorators
are applied correctly to both synchronous and asynchronous methods.
Yields:
Tuples of (api_class, method_name, injector_function)
"""
yield from _generate_api_and_injector(_openai_api_list())
def inject_openai_api():
"""This function:
1. Modifies the create methods of the OpenAI API classes to inject logic before calling the original methods.
It stores the original methods as _original attributes of the create methods.
2. Updates the openai api configs from environment variables.
"""
for api, method, injector in available_openai_apis_and_injectors():
# Check if the create method of the openai_api class has already been modified
if not hasattr(getattr(api, method), "_original"):
setattr(api, method, injector(getattr(api, method)))
if IS_LEGACY_OPENAI:
# For the openai versions lower than 1.0.0, it reads api configs from environment variables only at
# import time. So we need to update the openai api configs from environment variables here.
# Please refer to this issue: https://github.com/openai/openai-python/issues/557.
# The issue has been fixed in openai>=1.0.0.
openai.api_key = os.environ.get("OPENAI_API_KEY", openai.api_key)
openai.api_key_path = os.environ.get("OPENAI_API_KEY_PATH", openai.api_key_path)
openai.organization = os.environ.get("OPENAI_ORGANIZATION", openai.organization)
openai.api_base = os.environ.get("OPENAI_API_BASE", openai.api_base)
openai.api_type = os.environ.get("OPENAI_API_TYPE", openai.api_type)
openai.api_version = os.environ.get("OPENAI_API_VERSION", openai.api_version)
def recover_openai_api():
"""This function restores the original create methods of the OpenAI API classes
by assigning them back from the _original attributes of the modified methods.
"""
for api, method, _ in available_openai_apis_and_injectors():
if hasattr(getattr(api, method), "_original"):
setattr(api, method, getattr(getattr(api, method), "_original"))
| promptflow/src/promptflow/promptflow/_core/openai_injector.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_core/openai_injector.py",
"repo_id": "promptflow",
"token_count": 2929
} | 10 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
from promptflow._sdk._orm.run_info import RunInfo
from promptflow._sdk._orm.orchestrator import Orchestrator
from promptflow._sdk._orm.experiment_node_run import ExperimentNodeRun
from .connection import Connection
from .experiment import Experiment
from .session import mgmt_db_session
__all__ = [
"RunInfo",
"Connection",
"Experiment",
"ExperimentNodeRun",
"Orchestrator",
"mgmt_db_session",
]
| promptflow/src/promptflow/promptflow/_sdk/_orm/__init__.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_orm/__init__.py",
"repo_id": "promptflow",
"token_count": 198
} | 11 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import inspect
from pathlib import Path
from flask import jsonify, request
import promptflow._sdk.schemas._connection as connection
from promptflow._sdk._configuration import Configuration
from promptflow._sdk._service import Namespace, Resource, fields
from promptflow._sdk._service.utils.utils import build_pfs_user_agent, local_user_only, make_response_no_content
from promptflow._sdk.entities._connection import _Connection
api = Namespace("Connections", description="Connections Management")
# azure connection
def validate_working_directory(value):
if value is None:
return
if not isinstance(value, str):
value = str(value)
if not Path(value).is_dir():
raise ValueError("Invalid working directory.")
return value
working_directory_parser = api.parser()
working_directory_parser.add_argument(
"working_directory", type=validate_working_directory, location="args", required=False
)
# Response model of list connections
list_connection_field = api.model(
"Connection",
{
"name": fields.String,
"type": fields.String,
"module": fields.String,
"expiry_time": fields.String,
"created_date": fields.String,
"last_modified_date": fields.String,
},
)
# Response model of connection operation
dict_field = api.schema_model("ConnectionDict", {"additionalProperties": True, "type": "object"})
# Response model of connection spec
connection_config_spec_model = api.model(
"ConnectionConfigSpec",
{
"name": fields.String,
"optional": fields.Boolean,
"default": fields.String,
},
)
connection_spec_model = api.model(
"ConnectionSpec",
{
"connection_type": fields.String,
"config_spec": fields.List(fields.Nested(connection_config_spec_model)),
},
)
def _get_connection_operation(working_directory=None):
from promptflow._sdk._pf_client import PFClient
connection_provider = Configuration().get_connection_provider(path=working_directory)
# get_connection_operation is a shared function, so we build user agent based on request first and
# then pass it to the function
connection_operation = PFClient(
connection_provider=connection_provider, user_agent=build_pfs_user_agent()
).connections
return connection_operation
@api.route("/")
class ConnectionList(Resource):
@api.doc(parser=working_directory_parser, description="List all connection")
@api.marshal_with(list_connection_field, skip_none=True, as_list=True)
@local_user_only
@api.response(
code=403, description="This service is available for local user only, please specify X-Remote-User in headers."
)
def get(self):
args = working_directory_parser.parse_args()
connection_op = _get_connection_operation(args.working_directory)
# parse query parameters
max_results = request.args.get("max_results", default=50, type=int)
all_results = request.args.get("all_results", default=False, type=bool)
connections = connection_op.list(max_results=max_results, all_results=all_results)
connections_dict = [connection._to_dict() for connection in connections]
return connections_dict
@api.route("/<string:name>")
@api.param("name", "The connection name.")
class Connection(Resource):
@api.doc(parser=working_directory_parser, description="Get connection")
@api.response(code=200, description="Connection details", model=dict_field)
@local_user_only
@api.response(
code=403, description="This service is available for local user only, please specify X-Remote-User in headers."
)
def get(self, name: str):
args = working_directory_parser.parse_args()
connection_op = _get_connection_operation(args.working_directory)
connection = connection_op.get(name=name, raise_error=True)
connection_dict = connection._to_dict()
return jsonify(connection_dict)
@api.doc(body=dict_field, description="Create connection")
@api.response(code=200, description="Connection details", model=dict_field)
@local_user_only
@api.response(
code=403, description="This service is available for local user only, please specify X-Remote-User in headers."
)
def post(self, name: str):
connection_op = _get_connection_operation()
connection_data = request.get_json(force=True)
connection_data["name"] = name
connection = _Connection._load(data=connection_data)
connection = connection_op.create_or_update(connection)
return jsonify(connection._to_dict())
@api.doc(body=dict_field, description="Update connection")
@api.response(code=200, description="Connection details", model=dict_field)
@local_user_only
@api.response(
code=403, description="This service is available for local user only, please specify X-Remote-User in headers."
)
def put(self, name: str):
connection_op = _get_connection_operation()
connection_dict = request.get_json(force=True)
params_override = [{k: v} for k, v in connection_dict.items()]
# TODO: check if we need to record registry for this private operation
existing_connection = connection_op._get(name)
connection = _Connection._load(data=existing_connection._to_dict(), params_override=params_override)
connection._secrets = existing_connection._secrets
connection = connection_op.create_or_update(connection)
return jsonify(connection._to_dict())
@api.doc(description="Delete connection")
@local_user_only
@api.response(code=204, description="Delete connection", model=dict_field)
@api.response(
code=403, description="This service is available for local user only, please specify X-Remote-User in headers."
)
def delete(self, name: str):
connection_op = _get_connection_operation()
connection_op.delete(name=name)
return make_response_no_content()
@api.route("/<string:name>/listsecrets")
class ConnectionWithSecret(Resource):
@api.doc(parser=working_directory_parser, description="Get connection with secret")
@api.response(code=200, description="Connection details with secret", model=dict_field)
@local_user_only
@api.response(
code=403, description="This service is available for local user only, please specify X-Remote-User in headers."
)
def get(self, name: str):
args = working_directory_parser.parse_args()
connection_op = _get_connection_operation(args.working_directory)
connection = connection_op.get(name=name, with_secrets=True, raise_error=True)
connection_dict = connection._to_dict()
return jsonify(connection_dict)
@api.route("/specs")
class ConnectionSpecs(Resource):
@api.doc(description="List connection spec")
@api.response(code=200, description="List connection spec", skip_none=True, model=connection_spec_model)
def get(self):
hide_connection_fields = ["module"]
connection_specs = []
for name, obj in inspect.getmembers(connection):
if (
inspect.isclass(obj)
and issubclass(obj, connection.ConnectionSchema)
and not isinstance(obj, connection.ConnectionSchema)
):
config_specs = []
for field_name, field in obj._declared_fields.items():
if not field.dump_only and field_name not in hide_connection_fields:
configs = {"name": field_name, "optional": field.allow_none}
if field.default:
configs["default"] = field.default
if field_name == "type":
configs["default"] = field.allowed_values[0]
config_specs.append(configs)
connection_spec = {
"connection_type": name.replace("Schema", ""),
"config_specs": config_specs,
}
connection_specs.append(connection_spec)
return jsonify(connection_specs)
| promptflow/src/promptflow/promptflow/_sdk/_service/apis/connection.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_service/apis/connection.py",
"repo_id": "promptflow",
"token_count": 3034
} | 12 |
# ---------------------------------------------------------
# Copyright (c) 2013-2022 Caleb P. Burns credits dahlia <https://github.com/dahlia>
# Licensed under the MPLv2 License. See License.txt in the project root for
# license information.
# ---------------------------------------------------------
"""
This file code has been vendored from pathspec repo.
Please do not edit it, unless really necessary
"""
import dataclasses
import os
import posixpath
import re
import warnings
from typing import Any, AnyStr, Iterable, Iterator
from typing import Match as MatchHint
from typing import Optional
from typing import Pattern as PatternHint
from typing import Tuple, Union
NORMALIZE_PATH_SEPS = [sep for sep in [os.sep, os.altsep] if sep and sep != posixpath.sep]
# The encoding to use when parsing a byte string pattern.
# This provides the base definition for patterns.
_BYTES_ENCODING = "latin1"
class Pattern(object):
"""
The :class:`Pattern` class is the abstract definition of a pattern.
"""
# Make the class dict-less.
__slots__ = ("include",)
def __init__(self, include: Optional[bool]) -> None:
"""
Initializes the :class:`Pattern` instance.
*include* (:class:`bool` or :data:`None`) is whether the matched
files should be included (:data:`True`), excluded (:data:`False`),
or is a null-operation (:data:`None`).
"""
self.include = include
"""
*include* (:class:`bool` or :data:`None`) is whether the matched
files should be included (:data:`True`), excluded (:data:`False`),
or is a null-operation (:data:`None`).
"""
def match(self, files: Iterable[str]) -> Iterator[str]:
"""
DEPRECATED: This method is no longer used and has been replaced by
:meth:`.match_file`. Use the :meth:`.match_file` method with a loop
for similar results.
Matches this pattern against the specified files.
*files* (:class:`~collections.abc.Iterable` of :class:`str`)
contains each file relative to the root directory (e.g.,
:data:`"relative/path/to/file"`).
Returns an :class:`~collections.abc.Iterable` yielding each matched
file path (:class:`str`).
"""
warnings.warn(
(
"{0.__module__}.{0.__qualname__}.match() is deprecated. Use "
"{0.__module__}.{0.__qualname__}.match_file() with a loop for "
"similar results."
).format(self.__class__),
DeprecationWarning,
stacklevel=2,
)
for file in files:
if self.match_file(file) is not None:
yield file
def match_file(self, file: str) -> Optional[Any]:
"""
Matches this pattern against the specified file.
*file* (:class:`str`) is the normalized file path to match against.
Returns the match result if *file* matched; otherwise, :data:`None`.
"""
raise NotImplementedError(
("{0.__module__}.{0.__qualname__} must override match_file().").format(self.__class__)
)
class RegexPattern(Pattern):
"""
The :class:`RegexPattern` class is an implementation of a pattern
using regular expressions.
"""
# Keep the class dict-less.
__slots__ = ("regex",)
def __init__(
self,
pattern: Union[AnyStr, PatternHint],
include: Optional[bool] = None,
) -> None:
"""
Initializes the :class:`RegexPattern` instance.
*pattern* (:class:`str`, :class:`bytes`, :class:`re.Pattern`, or
:data:`None`) is the pattern to compile into a regular expression.
*include* (:class:`bool` or :data:`None`) must be :data:`None`
unless *pattern* is a precompiled regular expression (:class:`re.Pattern`)
in which case it is whether matched files should be included
(:data:`True`), excluded (:data:`False`), or is a null operation
(:data:`None`).
.. NOTE:: Subclasses do not need to support the *include*
parameter.
"""
if isinstance(pattern, (str, bytes)):
assert include is None, ("include:{!r} must be null when pattern:{!r} is a string.").format(
include, pattern
)
regex, include = self.pattern_to_regex(pattern)
# NOTE: Make sure to allow a null regular expression to be
# returned for a null-operation.
if include is not None:
regex = re.compile(regex)
elif pattern is not None and hasattr(pattern, "match"):
# Assume pattern is a precompiled regular expression.
# - NOTE: Used specified *include*.
regex = pattern
elif pattern is None:
# NOTE: Make sure to allow a null pattern to be passed for a
# null-operation.
assert include is None, ("include:{!r} must be null when pattern:{!r} is null.").format(include, pattern)
else:
raise TypeError("pattern:{!r} is not a string, re.Pattern, or None.".format(pattern))
super(RegexPattern, self).__init__(include)
self.regex: PatternHint = regex
"""
*regex* (:class:`re.Pattern`) is the regular expression for the
pattern.
"""
def __eq__(self, other: "RegexPattern") -> bool:
"""
Tests the equality of this regex pattern with *other* (:class:`RegexPattern`)
by comparing their :attr:`~Pattern.include` and :attr:`~RegexPattern.regex`
attributes.
"""
if isinstance(other, RegexPattern):
return self.include == other.include and self.regex == other.regex
return NotImplemented
def match_file(self, file: str) -> Optional["RegexMatchResult"]:
"""
Matches this pattern against the specified file.
*file* (:class:`str`)
contains each file relative to the root directory (e.g., "relative/path/to/file").
Returns the match result (:class:`RegexMatchResult`) if *file*
matched; otherwise, :data:`None`.
"""
if self.include is not None:
match = self.regex.match(file)
if match is not None:
return RegexMatchResult(match)
return None
@classmethod
def pattern_to_regex(cls, pattern: str) -> Tuple[str, bool]:
"""
Convert the pattern into an un-compiled regular expression.
*pattern* (:class:`str`) is the pattern to convert into a regular
expression.
Returns the un-compiled regular expression (:class:`str` or :data:`None`),
and whether matched files should be included (:data:`True`),
excluded (:data:`False`), or is a null-operation (:data:`None`).
.. NOTE:: The default implementation simply returns *pattern* and
:data:`True`.
"""
return pattern, True
@dataclasses.dataclass()
class RegexMatchResult(object):
"""
The :class:`RegexMatchResult` data class is used to return information
about the matched regular expression.
"""
# Keep the class dict-less.
__slots__ = ("match",)
match: MatchHint
"""
*match* (:class:`re.Match`) is the regex match result.
"""
class GitWildMatchPatternError(ValueError):
"""
The :class:`GitWildMatchPatternError` indicates an invalid git wild match
pattern.
"""
class GitWildMatchPattern(RegexPattern):
"""
The :class:`GitWildMatchPattern` class represents a compiled Git
wildmatch pattern.
"""
# Keep the dict-less class hierarchy.
__slots__ = ()
@classmethod
# pylint: disable=too-many-branches,too-many-statements
def pattern_to_regex(
cls,
pattern: AnyStr,
) -> Tuple[Optional[AnyStr], Optional[bool]]:
"""
Convert the pattern into a regular expression.
*pattern* (:class:`str` or :class:`bytes`) is the pattern to convert
into a regular expression.
Returns the un-compiled regular expression (:class:`str`, :class:`bytes`,
or :data:`None`); and whether matched files should be included
(:data:`True`), excluded (:data:`False`), or if it is a
null-operation (:data:`None`).
"""
if isinstance(pattern, str):
return_type = str
elif isinstance(pattern, bytes):
return_type = bytes
pattern = pattern.decode(_BYTES_ENCODING)
else:
raise TypeError(f"pattern:{pattern!r} is not a unicode or byte string.")
original_pattern = pattern
pattern = pattern.strip()
if pattern.startswith("#"):
# A pattern starting with a hash ('#') serves as a comment
# (neither includes nor excludes files). Escape the hash with a
# back-slash to match a literal hash (i.e., '\#').
regex = None
include = None
elif pattern == "/":
# EDGE CASE: According to `git check-ignore` (v2.4.1), a single
# '/' does not match any file.
regex = None
include = None
elif pattern:
if pattern.startswith("!"):
# A pattern starting with an exclamation mark ('!') negates the
# pattern (exclude instead of include). Escape the exclamation
# mark with a back-slash to match a literal exclamation mark
# (i.e., '\!').
include = False
# Remove leading exclamation mark.
pattern = pattern[1:]
else:
include = True
# Allow a regex override for edge cases that cannot be handled
# through normalization.
override_regex = None
# Split pattern into segments.
pattern_segments = pattern.split("/")
# Normalize pattern to make processing easier.
# EDGE CASE: Deal with duplicate double-asterisk sequences.
# Collapse each sequence down to one double-asterisk. Iterate over
# the segments in reverse and remove the duplicate double
# asterisks as we go.
for i in range(len(pattern_segments) - 1, 0, -1):
prev = pattern_segments[i - 1]
seg = pattern_segments[i]
if prev == "**" and seg == "**":
del pattern_segments[i]
if len(pattern_segments) == 2 and pattern_segments[0] == "**" and not pattern_segments[1]:
# EDGE CASE: The '**/' pattern should match everything except
# individual files in the root directory. This case cannot be
# adequately handled through normalization. Use the override.
override_regex = "^.+(?P<ps_d>/).*$"
if not pattern_segments[0]:
# A pattern beginning with a slash ('/') will only match paths
# directly on the root directory instead of any descendant
# paths. So, remove empty first segment to make pattern relative
# to root.
del pattern_segments[0]
elif len(pattern_segments) == 1 or (len(pattern_segments) == 2 and not pattern_segments[1]):
# A single pattern without a beginning slash ('/') will match
# any descendant path. This is equivalent to "**/{pattern}". So,
# prepend with double-asterisks to make pattern relative to
# root.
# EDGE CASE: This also holds for a single pattern with a
# trailing slash (e.g. dir/).
if pattern_segments[0] != "**":
pattern_segments.insert(0, "**")
else:
# EDGE CASE: A pattern without a beginning slash ('/') but
# contains at least one prepended directory (e.g.
# "dir/{pattern}") should not match "**/dir/{pattern}",
# according to `git check-ignore` (v2.4.1).
pass
if not pattern_segments:
# After resolving the edge cases, we end up with no pattern at
# all. This must be because the pattern is invalid.
raise GitWildMatchPatternError(f"Invalid git pattern: {original_pattern!r}")
if not pattern_segments[-1] and len(pattern_segments) > 1:
# A pattern ending with a slash ('/') will match all descendant
# paths if it is a directory but not if it is a regular file.
# This is equivalent to "{pattern}/**". So, set last segment to
# a double-asterisk to include all descendants.
pattern_segments[-1] = "**"
if override_regex is None:
# Build regular expression from pattern.
output = ["^"]
need_slash = False
end = len(pattern_segments) - 1
for i, seg in enumerate(pattern_segments):
if seg == "**":
if i == 0 and i == end:
# A pattern consisting solely of double-asterisks ('**')
# will match every path.
output.append(".+")
elif i == 0:
# A normalized pattern beginning with double-asterisks
# ('**') will match any leading path segments.
output.append("(?:.+/)?")
need_slash = False
elif i == end:
# A normalized pattern ending with double-asterisks ('**')
# will match any trailing path segments.
output.append("(?P<ps_d>/).*")
else:
# A pattern with inner double-asterisks ('**') will match
# multiple (or zero) inner path segments.
output.append("(?:/.+)?")
need_slash = True
elif seg == "*":
# Match single path segment.
if need_slash:
output.append("/")
output.append("[^/]+")
if i == end:
# A pattern ending without a slash ('/') will match a file
# or a directory (with paths underneath it). E.g., "foo"
# matches "foo", "foo/bar", "foo/bar/baz", etc.
output.append("(?:(?P<ps_d>/).*)?")
need_slash = True
else:
# Match segment glob pattern.
if need_slash:
output.append("/")
try:
output.append(cls._translate_segment_glob(seg))
except ValueError as e:
raise GitWildMatchPatternError(f"Invalid git pattern: {original_pattern!r}") from e
if i == end:
# A pattern ending without a slash ('/') will match a file
# or a directory (with paths underneath it). E.g., "foo"
# matches "foo", "foo/bar", "foo/bar/baz", etc.
output.append("(?:(?P<ps_d>/).*)?")
need_slash = True
output.append("$")
regex = "".join(output)
else:
# Use regex override.
regex = override_regex
else:
# A blank pattern is a null-operation (neither includes nor
# excludes files).
regex = None
include = None
if regex is not None and return_type is bytes:
regex = regex.encode(_BYTES_ENCODING)
return regex, include
@staticmethod
def _translate_segment_glob(pattern: str) -> str:
"""
Translates the glob pattern to a regular expression. This is used in
the constructor to translate a path segment glob pattern to its
corresponding regular expression.
*pattern* (:class:`str`) is the glob pattern.
Returns the regular expression (:class:`str`).
"""
# NOTE: This is derived from `fnmatch.translate()` and is similar to
# the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set.
escape = False
regex = ""
i, end = 0, len(pattern)
while i < end:
# Get next character.
char = pattern[i]
i += 1
if escape:
# Escape the character.
escape = False
regex += re.escape(char)
elif char == "\\":
# Escape character, escape next character.
escape = True
elif char == "*":
# Multi-character wildcard. Match any string (except slashes),
# including an empty string.
regex += "[^/]*"
elif char == "?":
# Single-character wildcard. Match any single character (except
# a slash).
regex += "[^/]"
elif char == "[":
# Bracket expression wildcard. Except for the beginning
# exclamation mark, the whole bracket expression can be used
# directly as regex but we have to find where the expression
# ends.
# - "[][!]" matches ']', '[' and '!'.
# - "[]-]" matches ']' and '-'.
# - "[!]a-]" matches any character except ']', 'a' and '-'.
j = i
# Pass back expression negation.
if j < end and pattern[j] == "!":
j += 1
# Pass first closing bracket if it is at the beginning of the
# expression.
if j < end and pattern[j] == "]":
j += 1
# Find closing bracket. Stop once we reach the end or find it.
while j < end and pattern[j] != "]":
j += 1
if j < end:
# Found end of bracket expression. Increment j to be one past
# the closing bracket:
#
# [...]
# ^ ^
# i j
#
j += 1
expr = "["
if pattern[i] == "!":
# Bracket expression needs to be negated.
expr += "^"
i += 1
elif pattern[i] == "^":
# POSIX declares that the regex bracket expression negation
# "[^...]" is undefined in a glob pattern. Python's
# `fnmatch.translate()` escapes the caret ('^') as a
# literal. To maintain consistency with undefined behavior,
# I am escaping the '^' as well.
expr += "\\^"
i += 1
# Build regex bracket expression. Escape slashes so they are
# treated as literal slashes by regex as defined by POSIX.
expr += pattern[i:j].replace("\\", "\\\\")
# Add regex bracket expression to regex result.
regex += expr
# Set i to one past the closing bracket.
i = j
else:
# Failed to find closing bracket, treat opening bracket as a
# bracket literal instead of as an expression.
regex += "\\["
else:
# Regular character, escape it for regex.
regex += re.escape(char)
if escape:
raise ValueError(f"Escape character found with no next character to escape: {pattern!r}")
return regex
@staticmethod
def escape(s: AnyStr) -> AnyStr:
"""
Escape special characters in the given string.
*s* (:class:`str` or :class:`bytes`) a filename or a string that you
want to escape, usually before adding it to a ".gitignore".
Returns the escaped string (:class:`str` or :class:`bytes`).
"""
if isinstance(s, str):
return_type = str
string = s
elif isinstance(s, bytes):
return_type = bytes
string = s.decode(_BYTES_ENCODING)
else:
raise TypeError(f"s:{s!r} is not a unicode or byte string.")
# Reference: https://git-scm.com/docs/gitignore#_pattern_format
meta_characters = r"[]!*#?"
out_string = "".join("\\" + x if x in meta_characters else x for x in string)
if return_type is bytes:
return out_string.encode(_BYTES_ENCODING)
return out_string
def normalize_file(file, separators=None):
# type - (Union[Text, PathLike], Optional[Collection[Text]]) -> Text
"""
Normalizes the file path to use the POSIX path separator (i.e.,
``'/'``), and make the paths relative (remove leading ``'/'``).
*file* (:class:`str` or :class:`pathlib.PurePath`) is the file path.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
This does not need to include the POSIX path separator (``'/'``), but
including it will not affect the results. Default is :data:`None` for
:data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty
container (e.g., an empty tuple ``()``).
Returns the normalized file path (:class:`str`).
"""
# Normalize path separators.
if separators is None:
separators = NORMALIZE_PATH_SEPS
# Convert path object to string.
norm_file = str(file)
for sep in separators:
norm_file = norm_file.replace(sep, posixpath.sep)
if norm_file.startswith("/"):
# Make path relative.
norm_file = norm_file[1:]
elif norm_file.startswith("./"):
# Remove current directory prefix.
norm_file = norm_file[2:]
return norm_file
| promptflow/src/promptflow/promptflow/_sdk/_vendor/_pathspec.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_vendor/_pathspec.py",
"repo_id": "promptflow",
"token_count": 10455
} | 13 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# pylint: disable=protected-access
import json
import typing
from marshmallow import Schema, ValidationError
from promptflow._utils.logger_utils import LoggerFactory
from .core import MutableValidationResult, ValidationResultBuilder
module_logger = LoggerFactory.get_logger(__name__)
class SchemaValidatableMixin:
"""The mixin class for schema validation."""
@classmethod
def _create_empty_validation_result(cls) -> MutableValidationResult:
"""Simply create an empty validation result
To reduce _ValidationResultBuilder importing, which is a private class.
:return: An empty validation result
:rtype: MutableValidationResult
"""
return ValidationResultBuilder.success()
@classmethod
def _load_with_schema(cls, data, *, context, raise_original_exception=False, **kwargs):
schema = cls._create_schema_for_validation(context=context)
try:
return schema.load(data, **kwargs)
except ValidationError as e:
if raise_original_exception:
raise e
msg = "Trying to load data with schema failed. Data:\n%s\nError: %s" % (
json.dumps(data, indent=4) if isinstance(data, dict) else data,
json.dumps(e.messages, indent=4),
)
raise cls._create_validation_error(
message=msg,
no_personal_data_message=str(e),
) from e
@classmethod
# pylint: disable-next=docstring-missing-param
def _create_schema_for_validation(cls, context) -> Schema:
"""Create a schema of the resource with specific context. Should be overridden by subclass.
:return: The schema of the resource.
:rtype: Schema.
"""
raise NotImplementedError()
def _default_context(self) -> dict:
"""Get the default context for schema validation. Should be overridden by subclass.
:return: The default context for schema validation
:rtype: dict
"""
raise NotImplementedError()
@property
def _schema_for_validation(self) -> Schema:
"""Return the schema of this Resource with default context. Do not override this method.
Override _create_schema_for_validation instead.
:return: The schema of the resource.
:rtype: Schema.
"""
return self._create_schema_for_validation(context=self._default_context())
def _dump_for_validation(self) -> typing.Dict:
"""Convert the resource to a dictionary.
:return: Converted dictionary
:rtype: typing.Dict
"""
return self._schema_for_validation.dump(self)
@classmethod
def _create_validation_error(cls, message: str, no_personal_data_message: str) -> Exception:
"""The function to create the validation exception to raise in _try_raise and _validate when
raise_error is True.
Should be overridden by subclass.
:param message: The error message containing detailed information
:type message: str
:param no_personal_data_message: The error message without personal data
:type no_personal_data_message: str
:return: The validation exception to raise
:rtype: Exception
"""
raise NotImplementedError()
@classmethod
def _try_raise(
cls, validation_result: MutableValidationResult, *, raise_error: bool = True
) -> MutableValidationResult:
return validation_result.try_raise(raise_error=raise_error, error_func=cls._create_validation_error)
def _validate(self, raise_error=False) -> MutableValidationResult:
"""Validate the resource. If raise_error is True, raise ValidationError if validation fails and log warnings if
applicable; Else, return the validation result.
:param raise_error: Whether to raise ValidationError if validation fails.
:type raise_error: bool
:return: The validation result
:rtype: MutableValidationResult
"""
result = self.__schema_validate()
result.merge_with(self._customized_validate())
return self._try_raise(result, raise_error=raise_error)
def _customized_validate(self) -> MutableValidationResult:
"""Validate the resource with customized logic.
Override this method to add customized validation logic.
:return: The customized validation result
:rtype: MutableValidationResult
"""
return self._create_empty_validation_result()
@classmethod
def _get_skip_fields_in_schema_validation(
cls,
) -> typing.List[str]:
"""Get the fields that should be skipped in schema validation.
Override this method to add customized validation logic.
:return: The fields to skip in schema validation
:rtype: typing.List[str]
"""
return []
def __schema_validate(self) -> MutableValidationResult:
"""Validate the resource with the schema.
:return: The validation result
:rtype: MutableValidationResult
"""
data = self._dump_for_validation()
messages = self._schema_for_validation.validate(data)
for skip_field in self._get_skip_fields_in_schema_validation():
if skip_field in messages:
del messages[skip_field]
return ValidationResultBuilder.from_validation_messages(messages, data=data)
| promptflow/src/promptflow/promptflow/_sdk/entities/_validation/schema.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/entities/_validation/schema.py",
"repo_id": "promptflow",
"token_count": 2108
} | 14 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import copy
import typing
from pathlib import Path
from marshmallow import fields
from marshmallow.exceptions import FieldInstanceResolutionError, ValidationError
from marshmallow.fields import _T, Field, Nested
from marshmallow.utils import RAISE, resolve_field_instance
from promptflow._sdk._constants import BASE_PATH_CONTEXT_KEY
from promptflow._sdk.schemas._base import PathAwareSchema
from promptflow._utils.logger_utils import LoggerFactory
# pylint: disable=unused-argument,no-self-use,protected-access
module_logger = LoggerFactory.get_logger(__name__)
class StringTransformedEnum(Field):
def __init__(self, **kwargs):
# pop marshmallow unknown args to avoid warnings
self.allowed_values = kwargs.pop("allowed_values", None)
self.casing_transform = kwargs.pop("casing_transform", lambda x: x.lower())
self.pass_original = kwargs.pop("pass_original", False)
super().__init__(**kwargs)
if isinstance(self.allowed_values, str):
self.allowed_values = [self.allowed_values]
self.allowed_values = [self.casing_transform(x) for x in self.allowed_values]
def _jsonschema_type_mapping(self):
schema = {"type": "string", "enum": self.allowed_values}
if self.name is not None:
schema["title"] = self.name
if self.dump_only:
schema["readonly"] = True
return schema
def _serialize(self, value, attr, obj, **kwargs):
if not value:
return
if isinstance(value, str) and self.casing_transform(value) in self.allowed_values:
return value if self.pass_original else self.casing_transform(value)
raise ValidationError(f"Value {value!r} passed is not in set {self.allowed_values}")
def _deserialize(self, value, attr, data, **kwargs):
if isinstance(value, str) and self.casing_transform(value) in self.allowed_values:
return value if self.pass_original else self.casing_transform(value)
raise ValidationError(f"Value {value!r} passed is not in set {self.allowed_values}")
class LocalPathField(fields.Str):
"""A field that validates that the input is a local path.
Can only be used as fields of PathAwareSchema.
"""
default_error_messages = {
"invalid_path": "The filename, directory name, or volume label syntax is incorrect.",
"path_not_exist": "Can't find {allow_type} in resolved absolute path: {path}.",
}
def __init__(self, allow_dir=True, allow_file=True, **kwargs):
self._allow_dir = allow_dir
self._allow_file = allow_file
self._pattern = kwargs.get("pattern", None)
super().__init__(**kwargs)
def _resolve_path(self, value) -> Path:
"""Resolve path to absolute path based on base_path in context.
Will resolve the path if it's already an absolute path.
"""
try:
result = Path(value)
base_path = Path(self.context[BASE_PATH_CONTEXT_KEY])
if not result.is_absolute():
result = base_path / result
# for non-path string like "azureml:/xxx", OSError can be raised in either
# resolve() or is_dir() or is_file()
result = result.resolve()
if (self._allow_dir and result.is_dir()) or (self._allow_file and result.is_file()):
return result
except OSError:
raise self.make_error("invalid_path")
raise self.make_error("path_not_exist", path=result.as_posix(), allow_type=self.allowed_path_type)
@property
def allowed_path_type(self) -> str:
if self._allow_dir and self._allow_file:
return "directory or file"
if self._allow_dir:
return "directory"
return "file"
def _validate(self, value):
# inherited validations like required, allow_none, etc.
super(LocalPathField, self)._validate(value)
if value is None:
return
self._resolve_path(value)
def _serialize(self, value, attr, obj, **kwargs) -> typing.Optional[str]:
# do not block serializing None even if required or not allow_none.
if value is None:
return None
# always dump path as absolute path in string as base_path will be dropped after serialization
return super(LocalPathField, self)._serialize(self._resolve_path(value).as_posix(), attr, obj, **kwargs)
def _deserialize(self, value, attr, data, **kwargs):
# resolve to absolute path
if value is None:
return None
return super()._deserialize(self._resolve_path(value).as_posix(), attr, data, **kwargs)
# Note: Currently contains a bug where the order in which fields are inputted can potentially cause a bug
# Example, the first line below works, but the second one fails upon calling load_from_dict
# with the error " AttributeError: 'list' object has no attribute 'get'"
# inputs = UnionField([fields.List(NestedField(DataSchema)), NestedField(DataSchema)])
# inputs = UnionField([NestedField(DataSchema), fields.List(NestedField(DataSchema))])
class UnionField(fields.Field):
def __init__(self, union_fields: typing.List[fields.Field], is_strict=False, **kwargs):
super().__init__(**kwargs)
try:
# add the validation and make sure union_fields must be subclasses or instances of
# marshmallow.base.FieldABC
self._union_fields = [resolve_field_instance(cls_or_instance) for cls_or_instance in union_fields]
# TODO: make serialization/de-serialization work in the same way as json schema when is_strict is True
self.is_strict = is_strict # S\When True, combine fields with oneOf instead of anyOf at schema generation
except FieldInstanceResolutionError as error:
raise ValueError(
'Elements of "union_fields" must be subclasses or ' "instances of marshmallow.base.FieldABC."
) from error
@property
def union_fields(self):
return iter(self._union_fields)
def insert_union_field(self, field):
self._union_fields.insert(0, field)
# This sets the parent for the schema and also handles nesting.
def _bind_to_schema(self, field_name, schema):
super()._bind_to_schema(field_name, schema)
self._union_fields = self._create_bind_fields(self._union_fields, field_name)
def _create_bind_fields(self, _fields, field_name):
new_union_fields = []
for field in _fields:
field = copy.deepcopy(field)
field._bind_to_schema(field_name, self)
new_union_fields.append(field)
return new_union_fields
def _serialize(self, value, attr, obj, **kwargs):
if value is None:
return None
errors = []
for field in self._union_fields:
try:
return field._serialize(value, attr, obj, **kwargs)
except ValidationError as e:
errors.extend(e.messages)
except (TypeError, ValueError, AttributeError) as e:
errors.extend([str(e)])
raise ValidationError(message=errors, field_name=attr)
def _deserialize(self, value, attr, data, **kwargs):
errors = []
for schema in self._union_fields:
try:
return schema.deserialize(value, attr, data, **kwargs)
except ValidationError as e:
errors.append(e.normalized_messages())
except (FileNotFoundError, TypeError) as e:
errors.append([str(e)])
finally:
# Revert base path to original path when job schema fail to deserialize job. For example, when load
# parallel job with component file reference starting with FILE prefix, maybe first CommandSchema will
# load component yaml according to AnonymousCommandComponentSchema, and YamlFileSchema will update base
# path. When CommandSchema fail to load, then Parallelschema will load component yaml according to
# AnonymousParallelComponentSchema, but base path now is incorrect, and will raise path not found error
# when load component yaml file.
if (
hasattr(schema, "name")
and schema.name == "jobs"
and hasattr(schema, "schema")
and isinstance(schema.schema, PathAwareSchema)
):
# use old base path to recover original base path
schema.schema.context[BASE_PATH_CONTEXT_KEY] = schema.schema.old_base_path
# recover base path of parent schema
schema.context[BASE_PATH_CONTEXT_KEY] = schema.schema.context[BASE_PATH_CONTEXT_KEY]
raise ValidationError(errors, field_name=attr)
class NestedField(Nested):
"""anticipates the default coming in next marshmallow version, unknown=True."""
def __init__(self, *args, **kwargs):
if kwargs.get("unknown") is None:
kwargs["unknown"] = RAISE
super().__init__(*args, **kwargs)
class DumpableIntegerField(fields.Integer):
"""An int field that cannot serialize other type of values to int if self.strict."""
def _serialize(self, value, attr, obj, **kwargs) -> typing.Optional[typing.Union[str, _T]]:
if self.strict and not isinstance(value, int):
# this implementation can serialize bool to bool
raise self.make_error("invalid", input=value)
return super()._serialize(value, attr, obj, **kwargs)
class DumpableFloatField(fields.Float):
"""A float field that cannot serialize other type of values to float if self.strict."""
def __init__(
self,
*,
strict: bool = False,
allow_nan: bool = False,
as_string: bool = False,
**kwargs,
):
self.strict = strict
super().__init__(allow_nan=allow_nan, as_string=as_string, **kwargs)
def _validated(self, value):
if self.strict and not isinstance(value, float):
raise self.make_error("invalid", input=value)
return super()._validated(value)
def _serialize(self, value, attr, obj, **kwargs) -> typing.Optional[typing.Union[str, _T]]:
return super()._serialize(self._validated(value), attr, obj, **kwargs)
def PrimitiveValueField(**kwargs):
"""Function to return a union field for primitive value.
:return: The primitive value field
:rtype: Field
"""
return UnionField(
[
# Note: order matters here - to make sure value parsed correctly.
# By default, when strict is false, marshmallow downcasts float to int.
# Setting it to true will throw a validation error when loading a float to int.
# https://github.com/marshmallow-code/marshmallow/pull/755
# Use DumpableIntegerField to make sure there will be validation error when
# loading/dumping a float to int.
# note that this field can serialize bool instance but cannot deserialize bool instance.
DumpableIntegerField(strict=True),
# Use DumpableFloatField with strict of True to avoid '1'(str) serialized to 1.0(float)
DumpableFloatField(strict=True),
# put string schema after Int and Float to make sure they won't dump to string
fields.Str(),
# fields.Bool comes last since it'll parse anything non-falsy to True
fields.Bool(),
],
**kwargs,
)
| promptflow/src/promptflow/promptflow/_sdk/schemas/_fields.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/schemas/_fields.py",
"repo_id": "promptflow",
"token_count": 4708
} | 15 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import re
from typing import Any, Dict, Mapping
from promptflow._constants import LINE_NUMBER_KEY
from promptflow._utils.logger_utils import LoggerFactory
from promptflow.batch._errors import InputMappingError
logger = LoggerFactory.get_logger(name=__name__)
def apply_inputs_mapping(
inputs: Mapping[str, Mapping[str, Any]],
inputs_mapping: Mapping[str, str],
) -> Dict[str, Any]:
"""Apply input mapping to inputs for new contract.
.. admonition:: Examples
.. code-block:: python
inputs: {
"data": {"answer": "I'm fine, thank you.", "question": "How are you?"},
"baseline": {"answer": "The weather is good."},
}
inputs_mapping: {
"question": "${data.question}",
"groundtruth": "${data.answer}",
"baseline": "${baseline.answer}",
"deployment_name": "literal_value",
}
Returns: {
"question": "How are you?",
"groundtruth": "I'm fine, thank you."
"baseline": "The weather is good.",
"deployment_name": "literal_value",
}
:param inputs: A mapping of input keys to their corresponding values.
:type inputs: Mapping[str, Mapping[str, Any]]
:param inputs_mapping: A mapping of input keys to their corresponding mapping expressions.
:type inputs_mapping: Mapping[str, str]
:return: A dictionary of input keys to their corresponding mapped values.
:rtype: Dict[str, Any]
:raises InputMappingError: If any of the input mapping relations are not found in the inputs.
"""
result = {}
notfound_mapping_relations = []
for map_to_key, map_value in inputs_mapping.items():
# Ignore reserved key configuration from input mapping.
if map_to_key == LINE_NUMBER_KEY:
continue
if not isinstance(map_value, str): # All non-string values are literal values.
result[map_to_key] = map_value
continue
match = re.search(r"^\${([^{}]+)}$", map_value)
if match is not None:
pattern = match.group(1)
# Could also try each pair of key value from inputs to match the pattern.
# But split pattern by '.' is one deterministic way.
# So, give key with less '.' higher priority.
splitted_str = pattern.split(".")
find_match = False
for i in range(1, len(splitted_str)):
key = ".".join(splitted_str[:i])
source = ".".join(splitted_str[i:])
if key in inputs and source in inputs[key]:
find_match = True
result[map_to_key] = inputs[key][source]
break
if not find_match:
notfound_mapping_relations.append(map_value)
else:
result[map_to_key] = map_value # Literal value
# Return all not found mapping relations in one exception to provide better debug experience.
if notfound_mapping_relations:
invalid_relations = ", ".join(notfound_mapping_relations)
raise InputMappingError(
message_format=(
"The input for batch run is incorrect. Couldn't find these mapping relations: {invalid_relations}. "
"Please make sure your input mapping keys and values match your YAML input section and input data. "
"For more information, refer to the following documentation: https://aka.ms/pf/column-mapping"
),
invalid_relations=invalid_relations,
)
# For PRS scenario, apply_inputs_mapping will be used for exec_line and line_number is not necessary.
if LINE_NUMBER_KEY in inputs:
result[LINE_NUMBER_KEY] = inputs[LINE_NUMBER_KEY]
return result
| promptflow/src/promptflow/promptflow/_utils/inputs_mapping_utils.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_utils/inputs_mapping_utils.py",
"repo_id": "promptflow",
"token_count": 1662
} | 16 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from pathlib import Path
RESOURCE_FOLDER = Path(__file__).parent.parent / "resources"
COMMAND_COMPONENT_SPEC_TEMPLATE = RESOURCE_FOLDER / "component_spec_template.yaml"
DEFAULT_PYTHON_VERSION = "3.9"
| promptflow/src/promptflow/promptflow/azure/_constants/_component.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_constants/_component.py",
"repo_id": "promptflow",
"token_count": 102
} | 17 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.2, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, Optional
from azure.core import AsyncPipelineClient
from azure.core.rest import AsyncHttpResponse, HttpRequest
from msrest import Deserializer, Serializer
from .. import models
from ._configuration import AzureMachineLearningDesignerServiceClientConfiguration
from .operations import BulkRunsOperations, ConnectionOperations, ConnectionsOperations, FlowRuntimesOperations, FlowRuntimesWorkspaceIndependentOperations, FlowSessionsOperations, FlowsOperations, FlowsProviderOperations, ToolsOperations, TraceSessionsOperations
class AzureMachineLearningDesignerServiceClient:
"""AzureMachineLearningDesignerServiceClient.
:ivar bulk_runs: BulkRunsOperations operations
:vartype bulk_runs: flow.aio.operations.BulkRunsOperations
:ivar connection: ConnectionOperations operations
:vartype connection: flow.aio.operations.ConnectionOperations
:ivar connections: ConnectionsOperations operations
:vartype connections: flow.aio.operations.ConnectionsOperations
:ivar flow_runtimes: FlowRuntimesOperations operations
:vartype flow_runtimes: flow.aio.operations.FlowRuntimesOperations
:ivar flow_runtimes_workspace_independent: FlowRuntimesWorkspaceIndependentOperations
operations
:vartype flow_runtimes_workspace_independent:
flow.aio.operations.FlowRuntimesWorkspaceIndependentOperations
:ivar flows: FlowsOperations operations
:vartype flows: flow.aio.operations.FlowsOperations
:ivar flow_sessions: FlowSessionsOperations operations
:vartype flow_sessions: flow.aio.operations.FlowSessionsOperations
:ivar flows_provider: FlowsProviderOperations operations
:vartype flows_provider: flow.aio.operations.FlowsProviderOperations
:ivar tools: ToolsOperations operations
:vartype tools: flow.aio.operations.ToolsOperations
:ivar trace_sessions: TraceSessionsOperations operations
:vartype trace_sessions: flow.aio.operations.TraceSessionsOperations
:param base_url: Service URL. Default value is ''.
:type base_url: str
:param api_version: Api Version. The default value is "1.0.0".
:type api_version: str
"""
def __init__(
self,
base_url: str = "",
api_version: Optional[str] = "1.0.0",
**kwargs: Any
) -> None:
self._config = AzureMachineLearningDesignerServiceClientConfiguration(api_version=api_version, **kwargs)
self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.bulk_runs = BulkRunsOperations(self._client, self._config, self._serialize, self._deserialize)
self.connection = ConnectionOperations(self._client, self._config, self._serialize, self._deserialize)
self.connections = ConnectionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.flow_runtimes = FlowRuntimesOperations(self._client, self._config, self._serialize, self._deserialize)
self.flow_runtimes_workspace_independent = FlowRuntimesWorkspaceIndependentOperations(self._client, self._config, self._serialize, self._deserialize)
self.flows = FlowsOperations(self._client, self._config, self._serialize, self._deserialize)
self.flow_sessions = FlowSessionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.flows_provider = FlowsProviderOperations(self._client, self._config, self._serialize, self._deserialize)
self.tools = ToolsOperations(self._client, self._config, self._serialize, self._deserialize)
self.trace_sessions = TraceSessionsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "AzureMachineLearningDesignerServiceClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/_azure_machine_learning_designer_service_client.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/_azure_machine_learning_designer_service_client.py",
"repo_id": "promptflow",
"token_count": 1941
} | 18 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.2, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import ACIAdvanceSettings
from ._models_py3 import AEVAComputeConfiguration
from ._models_py3 import AEVAResourceConfiguration
from ._models_py3 import AISuperComputerConfiguration
from ._models_py3 import AISuperComputerScalePolicy
from ._models_py3 import AISuperComputerStorageReferenceConfiguration
from ._models_py3 import AKSAdvanceSettings
from ._models_py3 import AKSReplicaStatus
from ._models_py3 import AMLComputeConfiguration
from ._models_py3 import APCloudConfiguration
from ._models_py3 import Activate
from ._models_py3 import AdditionalErrorInfo
from ._models_py3 import AdhocTriggerScheduledCommandJobRequest
from ._models_py3 import AdhocTriggerScheduledSparkJobRequest
from ._models_py3 import AetherAPCloudConfiguration
from ._models_py3 import AetherAmlDataset
from ._models_py3 import AetherAmlSparkCloudSetting
from ._models_py3 import AetherArgumentAssignment
from ._models_py3 import AetherAssetDefinition
from ._models_py3 import AetherAssetOutputSettings
from ._models_py3 import AetherAutoFeaturizeConfiguration
from ._models_py3 import AetherAutoMLComponentConfiguration
from ._models_py3 import AetherAutoTrainConfiguration
from ._models_py3 import AetherAzureBlobReference
from ._models_py3 import AetherAzureDataLakeGen2Reference
from ._models_py3 import AetherAzureDataLakeReference
from ._models_py3 import AetherAzureDatabaseReference
from ._models_py3 import AetherAzureFilesReference
from ._models_py3 import AetherBatchAiComputeInfo
from ._models_py3 import AetherBuildArtifactInfo
from ._models_py3 import AetherCloudBuildDropPathInfo
from ._models_py3 import AetherCloudBuildInfo
from ._models_py3 import AetherCloudBuildQueueInfo
from ._models_py3 import AetherCloudPrioritySetting
from ._models_py3 import AetherCloudSettings
from ._models_py3 import AetherColumnTransformer
from ._models_py3 import AetherComputeConfiguration
from ._models_py3 import AetherComputeSetting
from ._models_py3 import AetherControlInput
from ._models_py3 import AetherControlOutput
from ._models_py3 import AetherCopyDataTask
from ._models_py3 import AetherCosmosReference
from ._models_py3 import AetherCreatedBy
from ._models_py3 import AetherCustomReference
from ._models_py3 import AetherDBFSReference
from ._models_py3 import AetherDataLocation
from ._models_py3 import AetherDataLocationReuseCalculationFields
from ._models_py3 import AetherDataPath
from ._models_py3 import AetherDataReference
from ._models_py3 import AetherDataSetDefinition
from ._models_py3 import AetherDataSetDefinitionValue
from ._models_py3 import AetherDataSettings
from ._models_py3 import AetherDataTransferCloudConfiguration
from ._models_py3 import AetherDataTransferSink
from ._models_py3 import AetherDataTransferSource
from ._models_py3 import AetherDataTransferV2CloudSetting
from ._models_py3 import AetherDatabaseSink
from ._models_py3 import AetherDatabaseSource
from ._models_py3 import AetherDatabricksComputeInfo
from ._models_py3 import AetherDatasetOutput
from ._models_py3 import AetherDatasetOutputOptions
from ._models_py3 import AetherDatasetRegistration
from ._models_py3 import AetherDatastoreSetting
from ._models_py3 import AetherDoWhileControlFlowInfo
from ._models_py3 import AetherDoWhileControlFlowRunSettings
from ._models_py3 import AetherDockerSettingConfiguration
from ._models_py3 import AetherEntityInterfaceDocumentation
from ._models_py3 import AetherEntrySetting
from ._models_py3 import AetherEnvironmentConfiguration
from ._models_py3 import AetherEsCloudConfiguration
from ._models_py3 import AetherExportDataTask
from ._models_py3 import AetherFeaturizationSettings
from ._models_py3 import AetherFileSystem
from ._models_py3 import AetherForecastHorizon
from ._models_py3 import AetherForecastingSettings
from ._models_py3 import AetherGeneralSettings
from ._models_py3 import AetherGlobsOptions
from ._models_py3 import AetherGraphControlNode
from ._models_py3 import AetherGraphControlReferenceNode
from ._models_py3 import AetherGraphDatasetNode
from ._models_py3 import AetherGraphEdge
from ._models_py3 import AetherGraphEntity
from ._models_py3 import AetherGraphModuleNode
from ._models_py3 import AetherGraphReferenceNode
from ._models_py3 import AetherHdfsReference
from ._models_py3 import AetherHdiClusterComputeInfo
from ._models_py3 import AetherHdiRunConfiguration
from ._models_py3 import AetherHyperDriveConfiguration
from ._models_py3 import AetherIdentitySetting
from ._models_py3 import AetherImportDataTask
from ._models_py3 import AetherInputSetting
from ._models_py3 import AetherInteractiveConfig
from ._models_py3 import AetherK8SConfiguration
from ._models_py3 import AetherLegacyDataPath
from ._models_py3 import AetherLimitSettings
from ._models_py3 import AetherMlcComputeInfo
from ._models_py3 import AetherModuleEntity
from ._models_py3 import AetherModuleExtendedProperties
from ._models_py3 import AetherNCrossValidations
from ._models_py3 import AetherOutputSetting
from ._models_py3 import AetherParallelForControlFlowInfo
from ._models_py3 import AetherParameterAssignment
from ._models_py3 import AetherPhillyHdfsReference
from ._models_py3 import AetherPortInfo
from ._models_py3 import AetherPriorityConfig
from ._models_py3 import AetherPriorityConfiguration
from ._models_py3 import AetherRegisteredDataSetReference
from ._models_py3 import AetherRemoteDockerComputeInfo
from ._models_py3 import AetherResourceAssignment
from ._models_py3 import AetherResourceAttributeAssignment
from ._models_py3 import AetherResourceAttributeDefinition
from ._models_py3 import AetherResourceConfig
from ._models_py3 import AetherResourceConfiguration
from ._models_py3 import AetherResourceModel
from ._models_py3 import AetherResourcesSetting
from ._models_py3 import AetherSavedDataSetReference
from ._models_py3 import AetherScopeCloudConfiguration
from ._models_py3 import AetherSeasonality
from ._models_py3 import AetherSqlDataPath
from ._models_py3 import AetherStackEnsembleSettings
from ._models_py3 import AetherStoredProcedureParameter
from ._models_py3 import AetherStructuredInterface
from ._models_py3 import AetherStructuredInterfaceInput
from ._models_py3 import AetherStructuredInterfaceOutput
from ._models_py3 import AetherStructuredInterfaceParameter
from ._models_py3 import AetherSubGraphConfiguration
from ._models_py3 import AetherSweepEarlyTerminationPolicy
from ._models_py3 import AetherSweepSettings
from ._models_py3 import AetherSweepSettingsLimits
from ._models_py3 import AetherTargetLags
from ._models_py3 import AetherTargetRollingWindowSize
from ._models_py3 import AetherTargetSelectorConfiguration
from ._models_py3 import AetherTestDataSettings
from ._models_py3 import AetherTorchDistributedConfiguration
from ._models_py3 import AetherTrainingOutput
from ._models_py3 import AetherTrainingSettings
from ._models_py3 import AetherUIAzureOpenAIDeploymentNameSelector
from ._models_py3 import AetherUIAzureOpenAIModelCapabilities
from ._models_py3 import AetherUIColumnPicker
from ._models_py3 import AetherUIJsonEditor
from ._models_py3 import AetherUIParameterHint
from ._models_py3 import AetherUIPromptFlowConnectionSelector
from ._models_py3 import AetherValidationDataSettings
from ._models_py3 import AetherVsoBuildArtifactInfo
from ._models_py3 import AetherVsoBuildDefinitionInfo
from ._models_py3 import AetherVsoBuildInfo
from ._models_py3 import AmlDataset
from ._models_py3 import AmlK8SConfiguration
from ._models_py3 import AmlK8SPriorityConfiguration
from ._models_py3 import AmlSparkCloudSetting
from ._models_py3 import ApiAndParameters
from ._models_py3 import ApplicationEndpointConfiguration
from ._models_py3 import ArgumentAssignment
from ._models_py3 import Asset
from ._models_py3 import AssetDefinition
from ._models_py3 import AssetNameAndVersionIdentifier
from ._models_py3 import AssetOutputSettings
from ._models_py3 import AssetOutputSettingsParameter
from ._models_py3 import AssetPublishResult
from ._models_py3 import AssetPublishSingleRegionResult
from ._models_py3 import AssetTypeMetaInfo
from ._models_py3 import AssetVersionPublishRequest
from ._models_py3 import AssignedUser
from ._models_py3 import AttachCosmosRequest
from ._models_py3 import AuthKeys
from ._models_py3 import AutoClusterComputeSpecification
from ._models_py3 import AutoDeleteSetting
from ._models_py3 import AutoFeaturizeConfiguration
from ._models_py3 import AutoMLComponentConfiguration
from ._models_py3 import AutoScaler
from ._models_py3 import AutoTrainConfiguration
from ._models_py3 import AutologgerSettings
from ._models_py3 import AvailabilityResponse
from ._models_py3 import AzureBlobReference
from ._models_py3 import AzureDataLakeGen2Reference
from ._models_py3 import AzureDataLakeReference
from ._models_py3 import AzureDatabaseReference
from ._models_py3 import AzureFilesReference
from ._models_py3 import AzureMLModuleVersionDescriptor
from ._models_py3 import AzureOpenAIDeploymentDto
from ._models_py3 import AzureOpenAIModelCapabilities
from ._models_py3 import BatchAiComputeInfo
from ._models_py3 import BatchDataInput
from ._models_py3 import BatchExportComponentSpecResponse
from ._models_py3 import BatchExportRawComponentResponse
from ._models_py3 import BatchGetComponentHashesRequest
from ._models_py3 import BatchGetComponentRequest
from ._models_py3 import Binding
from ._models_py3 import BulkTestDto
from ._models_py3 import CloudError
from ._models_py3 import CloudPrioritySetting
from ._models_py3 import CloudSettings
from ._models_py3 import ColumnTransformer
from ._models_py3 import CommandJob
from ._models_py3 import CommandJobLimits
from ._models_py3 import CommandReturnCodeConfig
from ._models_py3 import ComponentConfiguration
from ._models_py3 import ComponentInput
from ._models_py3 import ComponentJob
from ._models_py3 import ComponentJobInput
from ._models_py3 import ComponentJobOutput
from ._models_py3 import ComponentNameAndDefaultVersion
from ._models_py3 import ComponentNameMetaInfo
from ._models_py3 import ComponentOutput
from ._models_py3 import ComponentPreflightResult
from ._models_py3 import ComponentSpecMetaInfo
from ._models_py3 import ComponentUpdateRequest
from ._models_py3 import ComponentValidationRequest
from ._models_py3 import ComponentValidationResponse
from ._models_py3 import Compute
from ._models_py3 import ComputeConfiguration
from ._models_py3 import ComputeContract
from ._models_py3 import ComputeIdentityContract
from ._models_py3 import ComputeIdentityDto
from ._models_py3 import ComputeInfo
from ._models_py3 import ComputeProperties
from ._models_py3 import ComputeRPUserAssignedIdentity
from ._models_py3 import ComputeRequest
from ._models_py3 import ComputeSetting
from ._models_py3 import ComputeStatus
from ._models_py3 import ComputeStatusDetail
from ._models_py3 import ComputeWarning
from ._models_py3 import ConnectionConfigSpec
from ._models_py3 import ConnectionDto
from ._models_py3 import ConnectionEntity
from ._models_py3 import ConnectionOverrideSetting
from ._models_py3 import ConnectionSpec
from ._models_py3 import ContainerInstanceConfiguration
from ._models_py3 import ContainerRegistry
from ._models_py3 import ContainerResourceRequirements
from ._models_py3 import ControlInput
from ._models_py3 import ControlOutput
from ._models_py3 import CopyDataTask
from ._models_py3 import CreateFlowRequest
from ._models_py3 import CreateFlowRuntimeRequest
from ._models_py3 import CreateFlowSessionRequest
from ._models_py3 import CreateInferencePipelineRequest
from ._models_py3 import CreateOrUpdateConnectionRequest
from ._models_py3 import CreateOrUpdateConnectionRequestDto
from ._models_py3 import CreatePipelineDraftRequest
from ._models_py3 import CreatePipelineJobScheduleDto
from ._models_py3 import CreatePublishedPipelineRequest
from ._models_py3 import CreateRealTimeEndpointRequest
from ._models_py3 import CreatedBy
from ._models_py3 import CreatedFromDto
from ._models_py3 import CreationContext
from ._models_py3 import Cron
from ._models_py3 import CustomConnectionConfig
from ._models_py3 import CustomReference
from ._models_py3 import DBFSReference
from ._models_py3 import Data
from ._models_py3 import DataInfo
from ._models_py3 import DataLocation
from ._models_py3 import DataPath
from ._models_py3 import DataPathParameter
from ._models_py3 import DataPortDto
from ._models_py3 import DataReference
from ._models_py3 import DataReferenceConfiguration
from ._models_py3 import DataSetDefinition
from ._models_py3 import DataSetDefinitionValue
from ._models_py3 import DataSetPathParameter
from ._models_py3 import DataSettings
from ._models_py3 import DataTransferCloudConfiguration
from ._models_py3 import DataTransferSink
from ._models_py3 import DataTransferSource
from ._models_py3 import DataTransferV2CloudSetting
from ._models_py3 import DataTypeCreationInfo
from ._models_py3 import DatabaseSink
from ._models_py3 import DatabaseSource
from ._models_py3 import DatabricksComputeInfo
from ._models_py3 import DatabricksConfiguration
from ._models_py3 import DatacacheConfiguration
from ._models_py3 import DatasetIdentifier
from ._models_py3 import DatasetInputDetails
from ._models_py3 import DatasetLineage
from ._models_py3 import DatasetOutput
from ._models_py3 import DatasetOutputDetails
from ._models_py3 import DatasetOutputOptions
from ._models_py3 import DatasetRegistration
from ._models_py3 import DatasetRegistrationOptions
from ._models_py3 import DatastoreSetting
from ._models_py3 import DbfsStorageInfoDto
from ._models_py3 import DebugInfoResponse
from ._models_py3 import DeployFlowRequest
from ._models_py3 import DeploymentInfo
from ._models_py3 import DistributionConfiguration
from ._models_py3 import DistributionParameter
from ._models_py3 import DoWhileControlFlowInfo
from ._models_py3 import DoWhileControlFlowRunSettings
from ._models_py3 import DockerBuildContext
from ._models_py3 import DockerConfiguration
from ._models_py3 import DockerImagePlatform
from ._models_py3 import DockerSection
from ._models_py3 import DockerSettingConfiguration
from ._models_py3 import DownloadResourceInfo
from ._models_py3 import EPRPipelineRunErrorClassificationRequest
from ._models_py3 import EndpointSetting
from ._models_py3 import EntityInterface
from ._models_py3 import EntrySetting
from ._models_py3 import EnumParameterRule
from ._models_py3 import EnvironmentConfiguration
from ._models_py3 import EnvironmentDefinition
from ._models_py3 import EnvironmentDefinitionDto
from ._models_py3 import ErrorAdditionalInfo
from ._models_py3 import ErrorResponse
from ._models_py3 import EsCloudConfiguration
from ._models_py3 import EvaluationFlowRunSettings
from ._models_py3 import ExampleRequest
from ._models_py3 import ExecutionContextDto
from ._models_py3 import ExecutionDataLocation
from ._models_py3 import ExecutionDataPath
from ._models_py3 import ExecutionGlobsOptions
from ._models_py3 import ExperimentComputeMetaInfo
from ._models_py3 import ExperimentInfo
from ._models_py3 import ExportComponentMetaInfo
from ._models_py3 import ExportDataTask
from ._models_py3 import FeaturizationSettings
from ._models_py3 import FeedDto
from ._models_py3 import FeedDtoSupportedAssetTypes
from ._models_py3 import FileSystem
from ._models_py3 import Flow
from ._models_py3 import FlowAnnotations
from ._models_py3 import FlowBaseDto
from ._models_py3 import FlowDto
from ._models_py3 import FlowEnvironment
from ._models_py3 import FlowFeature
from ._models_py3 import FlowFeatureState
from ._models_py3 import FlowGraph
from ._models_py3 import FlowGraphAnnotationNode
from ._models_py3 import FlowGraphLayout
from ._models_py3 import FlowGraphReference
from ._models_py3 import FlowIndexEntity
from ._models_py3 import FlowInputDefinition
from ._models_py3 import FlowNode
from ._models_py3 import FlowNodeLayout
from ._models_py3 import FlowNodeVariant
from ._models_py3 import FlowOutputDefinition
from ._models_py3 import FlowProperties
from ._models_py3 import FlowRunBasePath
from ._models_py3 import FlowRunInfo
from ._models_py3 import FlowRunResult
from ._models_py3 import FlowRunSettings
from ._models_py3 import FlowRunSettingsBase
from ._models_py3 import FlowRunStatusResponse
from ._models_py3 import FlowRuntimeCapability
from ._models_py3 import FlowRuntimeDto
from ._models_py3 import FlowSampleDto
from ._models_py3 import FlowSessionDto
from ._models_py3 import FlowSnapshot
from ._models_py3 import FlowSubmitRunSettings
from ._models_py3 import FlowTestInfo
from ._models_py3 import FlowTestStorageSetting
from ._models_py3 import FlowToolSettingParameter
from ._models_py3 import FlowToolsDto
from ._models_py3 import FlowVariantNode
from ._models_py3 import ForecastHorizon
from ._models_py3 import ForecastingSettings
from ._models_py3 import GeneralSettings
from ._models_py3 import GeneratePipelineComponentRequest
from ._models_py3 import GenerateToolMetaRequest
from ._models_py3 import GetDynamicListRequest
from ._models_py3 import GetRunDataResultDto
from ._models_py3 import GetTrainingSessionDto
from ._models_py3 import GlobalJobDispatcherConfiguration
from ._models_py3 import GlobsOptions
from ._models_py3 import GraphAnnotationNode
from ._models_py3 import GraphControlNode
from ._models_py3 import GraphControlReferenceNode
from ._models_py3 import GraphDatasetNode
from ._models_py3 import GraphDraftEntity
from ._models_py3 import GraphEdge
from ._models_py3 import GraphLayout
from ._models_py3 import GraphLayoutCreationInfo
from ._models_py3 import GraphModuleNode
from ._models_py3 import GraphModuleNodeRunSetting
from ._models_py3 import GraphModuleNodeUIInputSetting
from ._models_py3 import GraphNodeStatusInfo
from ._models_py3 import GraphReferenceNode
from ._models_py3 import HdfsReference
from ._models_py3 import HdiClusterComputeInfo
from ._models_py3 import HdiConfiguration
from ._models_py3 import HdiRunConfiguration
from ._models_py3 import HistoryConfiguration
from ._models_py3 import HyperDriveConfiguration
from ._models_py3 import ICheckableLongRunningOperationResponse
from ._models_py3 import IdentityConfiguration
from ._models_py3 import IdentitySetting
from ._models_py3 import ImportDataTask
from ._models_py3 import IndexedErrorResponse
from ._models_py3 import InitScriptInfoDto
from ._models_py3 import InnerErrorDetails
from ._models_py3 import InnerErrorResponse
from ._models_py3 import InputAsset
from ._models_py3 import InputData
from ._models_py3 import InputDataBinding
from ._models_py3 import InputDefinition
from ._models_py3 import InputOutputPortMetadata
from ._models_py3 import InputSetting
from ._models_py3 import IntellectualPropertyPublisherInformation
from ._models_py3 import InteractiveConfig
from ._models_py3 import InteractiveConfiguration
from ._models_py3 import JobCost
from ._models_py3 import JobEndpoint
from ._models_py3 import JobInput
from ._models_py3 import JobOutput
from ._models_py3 import JobOutputArtifacts
from ._models_py3 import JobScheduleDto
from ._models_py3 import K8SConfiguration
from ._models_py3 import KeyValuePairComponentNameMetaInfoErrorResponse
from ._models_py3 import KeyValuePairComponentNameMetaInfoModuleDto
from ._models_py3 import KeyValuePairStringObject
from ._models_py3 import KubernetesConfiguration
from ._models_py3 import Kwarg
from ._models_py3 import LegacyDataPath
from ._models_py3 import LimitSettings
from ._models_py3 import LinkedADBWorkspaceMetadata
from ._models_py3 import LinkedPipelineInfo
from ._models_py3 import LoadFlowAsComponentRequest
from ._models_py3 import LogRunTerminatedEventDto
from ._models_py3 import LongRunningOperationUriResponse
from ._models_py3 import LongRunningUpdateRegistryComponentRequest
from ._models_py3 import ManagedServiceIdentity
from ._models_py3 import MavenLibraryDto
from ._models_py3 import MetricProperties
from ._models_py3 import MetricSchemaDto
from ._models_py3 import MetricSchemaPropertyDto
from ._models_py3 import MetricV2Dto
from ._models_py3 import MetricV2Value
from ._models_py3 import MfeInternalAutologgerSettings
from ._models_py3 import MfeInternalIdentityConfiguration
from ._models_py3 import MfeInternalNodes
from ._models_py3 import MfeInternalOutputData
from ._models_py3 import MfeInternalSecretConfiguration
from ._models_py3 import MfeInternalUriReference
from ._models_py3 import MfeInternalV20211001ComponentJob
from ._models_py3 import MinMaxParameterRule
from ._models_py3 import MlcComputeInfo
from ._models_py3 import ModelDto
from ._models_py3 import ModelManagementErrorResponse
from ._models_py3 import ModifyPipelineJobScheduleDto
from ._models_py3 import ModuleDto
from ._models_py3 import ModuleDtoWithErrors
from ._models_py3 import ModuleDtoWithValidateStatus
from ._models_py3 import ModuleEntity
from ._models_py3 import ModulePythonInterface
from ._models_py3 import MpiConfiguration
from ._models_py3 import NCrossValidations
from ._models_py3 import Node
from ._models_py3 import NodeInputPort
from ._models_py3 import NodeLayout
from ._models_py3 import NodeOutputPort
from ._models_py3 import NodePortInterface
from ._models_py3 import NodeSource
from ._models_py3 import NodeTelemetryMetaInfo
from ._models_py3 import NodeVariant
from ._models_py3 import Nodes
from ._models_py3 import NoteBookTaskDto
from ._models_py3 import NotificationSetting
from ._models_py3 import ODataError
from ._models_py3 import ODataErrorDetail
from ._models_py3 import ODataErrorResponse
from ._models_py3 import ODataInnerError
from ._models_py3 import OutputData
from ._models_py3 import OutputDataBinding
from ._models_py3 import OutputDatasetLineage
from ._models_py3 import OutputDefinition
from ._models_py3 import OutputOptions
from ._models_py3 import OutputSetting
from ._models_py3 import OutputSettingSpec
from ._models_py3 import PaginatedDataInfoList
from ._models_py3 import PaginatedModelDtoList
from ._models_py3 import PaginatedModuleDtoList
from ._models_py3 import PaginatedPipelineDraftSummaryList
from ._models_py3 import PaginatedPipelineEndpointSummaryList
from ._models_py3 import PaginatedPipelineRunSummaryList
from ._models_py3 import PaginatedPublishedPipelineSummaryList
from ._models_py3 import ParallelForControlFlowInfo
from ._models_py3 import ParallelTaskConfiguration
from ._models_py3 import Parameter
from ._models_py3 import ParameterAssignment
from ._models_py3 import ParameterDefinition
from ._models_py3 import PatchFlowRequest
from ._models_py3 import Pipeline
from ._models_py3 import PipelineDraft
from ._models_py3 import PipelineDraftStepDetails
from ._models_py3 import PipelineDraftSummary
from ._models_py3 import PipelineEndpoint
from ._models_py3 import PipelineEndpointSummary
from ._models_py3 import PipelineGraph
from ._models_py3 import PipelineInput
from ._models_py3 import PipelineJob
from ._models_py3 import PipelineJobRuntimeBasicSettings
from ._models_py3 import PipelineJobScheduleDto
from ._models_py3 import PipelineOutput
from ._models_py3 import PipelineRun
from ._models_py3 import PipelineRunGraphDetail
from ._models_py3 import PipelineRunGraphStatus
from ._models_py3 import PipelineRunProfile
from ._models_py3 import PipelineRunStatus
from ._models_py3 import PipelineRunStepDetails
from ._models_py3 import PipelineRunSummary
from ._models_py3 import PipelineStatus
from ._models_py3 import PipelineStepRun
from ._models_py3 import PipelineStepRunOutputs
from ._models_py3 import PipelineSubDraft
from ._models_py3 import PolicyValidationResponse
from ._models_py3 import PortInfo
from ._models_py3 import PortOutputInfo
from ._models_py3 import PriorityConfig
from ._models_py3 import PriorityConfiguration
from ._models_py3 import PromoteDataSetRequest
from ._models_py3 import ProviderEntity
from ._models_py3 import PublishedPipeline
from ._models_py3 import PublishedPipelineSummary
from ._models_py3 import PyTorchConfiguration
from ._models_py3 import PythonInterfaceMapping
from ._models_py3 import PythonPyPiOrRCranLibraryDto
from ._models_py3 import PythonSection
from ._models_py3 import QueueingInfo
from ._models_py3 import RCranPackage
from ._models_py3 import RGitHubPackage
from ._models_py3 import RSection
from ._models_py3 import RawComponentDto
from ._models_py3 import RayConfiguration
from ._models_py3 import RealTimeEndpoint
from ._models_py3 import RealTimeEndpointInfo
from ._models_py3 import RealTimeEndpointStatus
from ._models_py3 import RealTimeEndpointSummary
from ._models_py3 import RealTimeEndpointTestRequest
from ._models_py3 import Recurrence
from ._models_py3 import RecurrencePattern
from ._models_py3 import RecurrenceSchedule
from ._models_py3 import RegenerateServiceKeysRequest
from ._models_py3 import RegisterComponentMetaInfo
from ._models_py3 import RegisterComponentMetaInfoExtraHashes
from ._models_py3 import RegisterComponentMetaInfoIdentifierHashes
from ._models_py3 import RegisterRegistryComponentMetaInfo
from ._models_py3 import RegisterRegistryComponentMetaInfoExtraHashes
from ._models_py3 import RegisterRegistryComponentMetaInfoIdentifierHashes
from ._models_py3 import RegisteredDataSetReference
from ._models_py3 import RegistrationOptions
from ._models_py3 import RegistryBlobReferenceData
from ._models_py3 import RegistryIdentity
from ._models_py3 import Relationship
from ._models_py3 import RemoteDockerComputeInfo
from ._models_py3 import ResourceConfig
from ._models_py3 import ResourceConfiguration
from ._models_py3 import ResourcesSetting
from ._models_py3 import RetrieveToolFuncResultRequest
from ._models_py3 import RetryConfiguration
from ._models_py3 import RootError
from ._models_py3 import RunAnnotations
from ._models_py3 import RunCommandsCommandResult
from ._models_py3 import RunConfiguration
from ._models_py3 import RunDatasetReference
from ._models_py3 import RunDefinition
from ._models_py3 import RunDetailsDto
from ._models_py3 import RunDetailsWarningDto
from ._models_py3 import RunDto
from ._models_py3 import RunIndexEntity
from ._models_py3 import RunIndexMetricSummary
from ._models_py3 import RunIndexMetricSummarySystemObject
from ._models_py3 import RunIndexResourceMetricSummary
from ._models_py3 import RunMetricDto
from ._models_py3 import RunMetricsTypesDto
from ._models_py3 import RunProperties
from ._models_py3 import RunSettingParameter
from ._models_py3 import RunSettingParameterAssignment
from ._models_py3 import RunSettingUIParameterHint
from ._models_py3 import RunStatusPeriod
from ._models_py3 import RunTypeV2
from ._models_py3 import RunTypeV2Index
from ._models_py3 import RuntimeConfiguration
from ._models_py3 import SampleMeta
from ._models_py3 import SavePipelineDraftRequest
from ._models_py3 import SavedDataSetReference
from ._models_py3 import ScheduleBase
from ._models_py3 import SchemaContractsCreatedBy
from ._models_py3 import ScopeCloudConfiguration
from ._models_py3 import Seasonality
from ._models_py3 import SecretConfiguration
from ._models_py3 import SegmentedResult1
from ._models_py3 import ServiceLogRequest
from ._models_py3 import SessionApplication
from ._models_py3 import SessionApplicationRunCommandResult
from ._models_py3 import SessionProperties
from ._models_py3 import SetupFlowSessionRequest
from ._models_py3 import SharingScope
from ._models_py3 import Snapshot
from ._models_py3 import SnapshotInfo
from ._models_py3 import SourceCodeDataReference
from ._models_py3 import SparkConfiguration
from ._models_py3 import SparkJarTaskDto
from ._models_py3 import SparkJob
from ._models_py3 import SparkJobEntry
from ._models_py3 import SparkMavenPackage
from ._models_py3 import SparkPythonTaskDto
from ._models_py3 import SparkResourceConfiguration
from ._models_py3 import SparkSection
from ._models_py3 import SparkSubmitTaskDto
from ._models_py3 import SqlDataPath
from ._models_py3 import StackEnsembleSettings
from ._models_py3 import StandbyPoolProperties
from ._models_py3 import StandbyPoolResourceStatus
from ._models_py3 import StartRunResult
from ._models_py3 import StepRunProfile
from ._models_py3 import StorageInfo
from ._models_py3 import StoredProcedureParameter
from ._models_py3 import Stream
from ._models_py3 import StructuredInterface
from ._models_py3 import StructuredInterfaceInput
from ._models_py3 import StructuredInterfaceOutput
from ._models_py3 import StructuredInterfaceParameter
from ._models_py3 import StudioMigrationInfo
from ._models_py3 import SubGraphConcatenateAssignment
from ._models_py3 import SubGraphConfiguration
from ._models_py3 import SubGraphConnectionInfo
from ._models_py3 import SubGraphDataPathParameterAssignment
from ._models_py3 import SubGraphInfo
from ._models_py3 import SubGraphParameterAssignment
from ._models_py3 import SubGraphPortInfo
from ._models_py3 import SubPipelineDefinition
from ._models_py3 import SubPipelineParameterAssignment
from ._models_py3 import SubPipelinesInfo
from ._models_py3 import SubStatusPeriod
from ._models_py3 import SubmitBulkRunRequest
from ._models_py3 import SubmitBulkRunResponse
from ._models_py3 import SubmitFlowRequest
from ._models_py3 import SubmitPipelineRunRequest
from ._models_py3 import SweepEarlyTerminationPolicy
from ._models_py3 import SweepSettings
from ._models_py3 import SweepSettingsLimits
from ._models_py3 import SystemData
from ._models_py3 import SystemMeta
from ._models_py3 import SystemMetaExtraHashes
from ._models_py3 import SystemMetaIdentifierHashes
from ._models_py3 import TargetLags
from ._models_py3 import TargetRollingWindowSize
from ._models_py3 import TargetSelectorConfiguration
from ._models_py3 import Task
from ._models_py3 import TaskControlFlowInfo
from ._models_py3 import TaskReuseInfo
from ._models_py3 import TensorflowConfiguration
from ._models_py3 import TestDataSettings
from ._models_py3 import Tool
from ._models_py3 import ToolFuncResponse
from ._models_py3 import ToolInputDynamicList
from ._models_py3 import ToolInputGeneratedBy
from ._models_py3 import ToolMetaDto
from ._models_py3 import ToolSetting
from ._models_py3 import ToolSourceMeta
from ._models_py3 import TorchDistributedConfiguration
from ._models_py3 import TrainingDiagnosticConfiguration
from ._models_py3 import TrainingOutput
from ._models_py3 import TrainingSettings
from ._models_py3 import TriggerAsyncOperationStatus
from ._models_py3 import TuningNodeRunSetting
from ._models_py3 import TuningNodeSetting
from ._models_py3 import TypedAssetReference
from ._models_py3 import UIAzureOpenAIDeploymentNameSelector
from ._models_py3 import UIAzureOpenAIModelCapabilities
from ._models_py3 import UIColumnPicker
from ._models_py3 import UIComputeSelection
from ._models_py3 import UIHyperparameterConfiguration
from ._models_py3 import UIInputSetting
from ._models_py3 import UIJsonEditor
from ._models_py3 import UIParameterHint
from ._models_py3 import UIPromptFlowConnectionSelector
from ._models_py3 import UIWidgetMetaInfo
from ._models_py3 import UIYamlEditor
from ._models_py3 import UnversionedEntityRequestDto
from ._models_py3 import UnversionedEntityResponseDto
from ._models_py3 import UnversionedRebuildIndexDto
from ._models_py3 import UnversionedRebuildResponseDto
from ._models_py3 import UpdateComponentRequest
from ._models_py3 import UpdateFlowRequest
from ._models_py3 import UpdateFlowRuntimeRequest
from ._models_py3 import UpdateFlowStatusRequest
from ._models_py3 import UpdateRegistryComponentRequest
from ._models_py3 import UploadOptions
from ._models_py3 import UriReference
from ._models_py3 import User
from ._models_py3 import UserAssignedIdentity
from ._models_py3 import ValidationDataSettings
from ._models_py3 import VariantIdentifier
from ._models_py3 import VariantNode
from ._models_py3 import Volume
from ._models_py3 import WebServiceComputeMetaInfo
from ._models_py3 import WebServicePort
from ._models_py3 import Webhook
from ._models_py3 import WorkspaceConnectionSpec
except (SyntaxError, ImportError):
from ._models import ACIAdvanceSettings # type: ignore
from ._models import AEVAComputeConfiguration # type: ignore
from ._models import AEVAResourceConfiguration # type: ignore
from ._models import AISuperComputerConfiguration # type: ignore
from ._models import AISuperComputerScalePolicy # type: ignore
from ._models import AISuperComputerStorageReferenceConfiguration # type: ignore
from ._models import AKSAdvanceSettings # type: ignore
from ._models import AKSReplicaStatus # type: ignore
from ._models import AMLComputeConfiguration # type: ignore
from ._models import APCloudConfiguration # type: ignore
from ._models import Activate # type: ignore
from ._models import AdditionalErrorInfo # type: ignore
from ._models import AdhocTriggerScheduledCommandJobRequest # type: ignore
from ._models import AdhocTriggerScheduledSparkJobRequest # type: ignore
from ._models import AetherAPCloudConfiguration # type: ignore
from ._models import AetherAmlDataset # type: ignore
from ._models import AetherAmlSparkCloudSetting # type: ignore
from ._models import AetherArgumentAssignment # type: ignore
from ._models import AetherAssetDefinition # type: ignore
from ._models import AetherAssetOutputSettings # type: ignore
from ._models import AetherAutoFeaturizeConfiguration # type: ignore
from ._models import AetherAutoMLComponentConfiguration # type: ignore
from ._models import AetherAutoTrainConfiguration # type: ignore
from ._models import AetherAzureBlobReference # type: ignore
from ._models import AetherAzureDataLakeGen2Reference # type: ignore
from ._models import AetherAzureDataLakeReference # type: ignore
from ._models import AetherAzureDatabaseReference # type: ignore
from ._models import AetherAzureFilesReference # type: ignore
from ._models import AetherBatchAiComputeInfo # type: ignore
from ._models import AetherBuildArtifactInfo # type: ignore
from ._models import AetherCloudBuildDropPathInfo # type: ignore
from ._models import AetherCloudBuildInfo # type: ignore
from ._models import AetherCloudBuildQueueInfo # type: ignore
from ._models import AetherCloudPrioritySetting # type: ignore
from ._models import AetherCloudSettings # type: ignore
from ._models import AetherColumnTransformer # type: ignore
from ._models import AetherComputeConfiguration # type: ignore
from ._models import AetherComputeSetting # type: ignore
from ._models import AetherControlInput # type: ignore
from ._models import AetherControlOutput # type: ignore
from ._models import AetherCopyDataTask # type: ignore
from ._models import AetherCosmosReference # type: ignore
from ._models import AetherCreatedBy # type: ignore
from ._models import AetherCustomReference # type: ignore
from ._models import AetherDBFSReference # type: ignore
from ._models import AetherDataLocation # type: ignore
from ._models import AetherDataLocationReuseCalculationFields # type: ignore
from ._models import AetherDataPath # type: ignore
from ._models import AetherDataReference # type: ignore
from ._models import AetherDataSetDefinition # type: ignore
from ._models import AetherDataSetDefinitionValue # type: ignore
from ._models import AetherDataSettings # type: ignore
from ._models import AetherDataTransferCloudConfiguration # type: ignore
from ._models import AetherDataTransferSink # type: ignore
from ._models import AetherDataTransferSource # type: ignore
from ._models import AetherDataTransferV2CloudSetting # type: ignore
from ._models import AetherDatabaseSink # type: ignore
from ._models import AetherDatabaseSource # type: ignore
from ._models import AetherDatabricksComputeInfo # type: ignore
from ._models import AetherDatasetOutput # type: ignore
from ._models import AetherDatasetOutputOptions # type: ignore
from ._models import AetherDatasetRegistration # type: ignore
from ._models import AetherDatastoreSetting # type: ignore
from ._models import AetherDoWhileControlFlowInfo # type: ignore
from ._models import AetherDoWhileControlFlowRunSettings # type: ignore
from ._models import AetherDockerSettingConfiguration # type: ignore
from ._models import AetherEntityInterfaceDocumentation # type: ignore
from ._models import AetherEntrySetting # type: ignore
from ._models import AetherEnvironmentConfiguration # type: ignore
from ._models import AetherEsCloudConfiguration # type: ignore
from ._models import AetherExportDataTask # type: ignore
from ._models import AetherFeaturizationSettings # type: ignore
from ._models import AetherFileSystem # type: ignore
from ._models import AetherForecastHorizon # type: ignore
from ._models import AetherForecastingSettings # type: ignore
from ._models import AetherGeneralSettings # type: ignore
from ._models import AetherGlobsOptions # type: ignore
from ._models import AetherGraphControlNode # type: ignore
from ._models import AetherGraphControlReferenceNode # type: ignore
from ._models import AetherGraphDatasetNode # type: ignore
from ._models import AetherGraphEdge # type: ignore
from ._models import AetherGraphEntity # type: ignore
from ._models import AetherGraphModuleNode # type: ignore
from ._models import AetherGraphReferenceNode # type: ignore
from ._models import AetherHdfsReference # type: ignore
from ._models import AetherHdiClusterComputeInfo # type: ignore
from ._models import AetherHdiRunConfiguration # type: ignore
from ._models import AetherHyperDriveConfiguration # type: ignore
from ._models import AetherIdentitySetting # type: ignore
from ._models import AetherImportDataTask # type: ignore
from ._models import AetherInputSetting # type: ignore
from ._models import AetherInteractiveConfig # type: ignore
from ._models import AetherK8SConfiguration # type: ignore
from ._models import AetherLegacyDataPath # type: ignore
from ._models import AetherLimitSettings # type: ignore
from ._models import AetherMlcComputeInfo # type: ignore
from ._models import AetherModuleEntity # type: ignore
from ._models import AetherModuleExtendedProperties # type: ignore
from ._models import AetherNCrossValidations # type: ignore
from ._models import AetherOutputSetting # type: ignore
from ._models import AetherParallelForControlFlowInfo # type: ignore
from ._models import AetherParameterAssignment # type: ignore
from ._models import AetherPhillyHdfsReference # type: ignore
from ._models import AetherPortInfo # type: ignore
from ._models import AetherPriorityConfig # type: ignore
from ._models import AetherPriorityConfiguration # type: ignore
from ._models import AetherRegisteredDataSetReference # type: ignore
from ._models import AetherRemoteDockerComputeInfo # type: ignore
from ._models import AetherResourceAssignment # type: ignore
from ._models import AetherResourceAttributeAssignment # type: ignore
from ._models import AetherResourceAttributeDefinition # type: ignore
from ._models import AetherResourceConfig # type: ignore
from ._models import AetherResourceConfiguration # type: ignore
from ._models import AetherResourceModel # type: ignore
from ._models import AetherResourcesSetting # type: ignore
from ._models import AetherSavedDataSetReference # type: ignore
from ._models import AetherScopeCloudConfiguration # type: ignore
from ._models import AetherSeasonality # type: ignore
from ._models import AetherSqlDataPath # type: ignore
from ._models import AetherStackEnsembleSettings # type: ignore
from ._models import AetherStoredProcedureParameter # type: ignore
from ._models import AetherStructuredInterface # type: ignore
from ._models import AetherStructuredInterfaceInput # type: ignore
from ._models import AetherStructuredInterfaceOutput # type: ignore
from ._models import AetherStructuredInterfaceParameter # type: ignore
from ._models import AetherSubGraphConfiguration # type: ignore
from ._models import AetherSweepEarlyTerminationPolicy # type: ignore
from ._models import AetherSweepSettings # type: ignore
from ._models import AetherSweepSettingsLimits # type: ignore
from ._models import AetherTargetLags # type: ignore
from ._models import AetherTargetRollingWindowSize # type: ignore
from ._models import AetherTargetSelectorConfiguration # type: ignore
from ._models import AetherTestDataSettings # type: ignore
from ._models import AetherTorchDistributedConfiguration # type: ignore
from ._models import AetherTrainingOutput # type: ignore
from ._models import AetherTrainingSettings # type: ignore
from ._models import AetherUIAzureOpenAIDeploymentNameSelector # type: ignore
from ._models import AetherUIAzureOpenAIModelCapabilities # type: ignore
from ._models import AetherUIColumnPicker # type: ignore
from ._models import AetherUIJsonEditor # type: ignore
from ._models import AetherUIParameterHint # type: ignore
from ._models import AetherUIPromptFlowConnectionSelector # type: ignore
from ._models import AetherValidationDataSettings # type: ignore
from ._models import AetherVsoBuildArtifactInfo # type: ignore
from ._models import AetherVsoBuildDefinitionInfo # type: ignore
from ._models import AetherVsoBuildInfo # type: ignore
from ._models import AmlDataset # type: ignore
from ._models import AmlK8SConfiguration # type: ignore
from ._models import AmlK8SPriorityConfiguration # type: ignore
from ._models import AmlSparkCloudSetting # type: ignore
from ._models import ApiAndParameters # type: ignore
from ._models import ApplicationEndpointConfiguration # type: ignore
from ._models import ArgumentAssignment # type: ignore
from ._models import Asset # type: ignore
from ._models import AssetDefinition # type: ignore
from ._models import AssetNameAndVersionIdentifier # type: ignore
from ._models import AssetOutputSettings # type: ignore
from ._models import AssetOutputSettingsParameter # type: ignore
from ._models import AssetPublishResult # type: ignore
from ._models import AssetPublishSingleRegionResult # type: ignore
from ._models import AssetTypeMetaInfo # type: ignore
from ._models import AssetVersionPublishRequest # type: ignore
from ._models import AssignedUser # type: ignore
from ._models import AttachCosmosRequest # type: ignore
from ._models import AuthKeys # type: ignore
from ._models import AutoClusterComputeSpecification # type: ignore
from ._models import AutoDeleteSetting # type: ignore
from ._models import AutoFeaturizeConfiguration # type: ignore
from ._models import AutoMLComponentConfiguration # type: ignore
from ._models import AutoScaler # type: ignore
from ._models import AutoTrainConfiguration # type: ignore
from ._models import AutologgerSettings # type: ignore
from ._models import AvailabilityResponse # type: ignore
from ._models import AzureBlobReference # type: ignore
from ._models import AzureDataLakeGen2Reference # type: ignore
from ._models import AzureDataLakeReference # type: ignore
from ._models import AzureDatabaseReference # type: ignore
from ._models import AzureFilesReference # type: ignore
from ._models import AzureMLModuleVersionDescriptor # type: ignore
from ._models import AzureOpenAIDeploymentDto # type: ignore
from ._models import AzureOpenAIModelCapabilities # type: ignore
from ._models import BatchAiComputeInfo # type: ignore
from ._models import BatchDataInput # type: ignore
from ._models import BatchExportComponentSpecResponse # type: ignore
from ._models import BatchExportRawComponentResponse # type: ignore
from ._models import BatchGetComponentHashesRequest # type: ignore
from ._models import BatchGetComponentRequest # type: ignore
from ._models import Binding # type: ignore
from ._models import BulkTestDto # type: ignore
from ._models import CloudError # type: ignore
from ._models import CloudPrioritySetting # type: ignore
from ._models import CloudSettings # type: ignore
from ._models import ColumnTransformer # type: ignore
from ._models import CommandJob # type: ignore
from ._models import CommandJobLimits # type: ignore
from ._models import CommandReturnCodeConfig # type: ignore
from ._models import ComponentConfiguration # type: ignore
from ._models import ComponentInput # type: ignore
from ._models import ComponentJob # type: ignore
from ._models import ComponentJobInput # type: ignore
from ._models import ComponentJobOutput # type: ignore
from ._models import ComponentNameAndDefaultVersion # type: ignore
from ._models import ComponentNameMetaInfo # type: ignore
from ._models import ComponentOutput # type: ignore
from ._models import ComponentPreflightResult # type: ignore
from ._models import ComponentSpecMetaInfo # type: ignore
from ._models import ComponentUpdateRequest # type: ignore
from ._models import ComponentValidationRequest # type: ignore
from ._models import ComponentValidationResponse # type: ignore
from ._models import Compute # type: ignore
from ._models import ComputeConfiguration # type: ignore
from ._models import ComputeContract # type: ignore
from ._models import ComputeIdentityContract # type: ignore
from ._models import ComputeIdentityDto # type: ignore
from ._models import ComputeInfo # type: ignore
from ._models import ComputeProperties # type: ignore
from ._models import ComputeRPUserAssignedIdentity # type: ignore
from ._models import ComputeRequest # type: ignore
from ._models import ComputeSetting # type: ignore
from ._models import ComputeStatus # type: ignore
from ._models import ComputeStatusDetail # type: ignore
from ._models import ComputeWarning # type: ignore
from ._models import ConnectionConfigSpec # type: ignore
from ._models import ConnectionDto # type: ignore
from ._models import ConnectionEntity # type: ignore
from ._models import ConnectionOverrideSetting # type: ignore
from ._models import ConnectionSpec # type: ignore
from ._models import ContainerInstanceConfiguration # type: ignore
from ._models import ContainerRegistry # type: ignore
from ._models import ContainerResourceRequirements # type: ignore
from ._models import ControlInput # type: ignore
from ._models import ControlOutput # type: ignore
from ._models import CopyDataTask # type: ignore
from ._models import CreateFlowRequest # type: ignore
from ._models import CreateFlowRuntimeRequest # type: ignore
from ._models import CreateFlowSessionRequest # type: ignore
from ._models import CreateInferencePipelineRequest # type: ignore
from ._models import CreateOrUpdateConnectionRequest # type: ignore
from ._models import CreateOrUpdateConnectionRequestDto # type: ignore
from ._models import CreatePipelineDraftRequest # type: ignore
from ._models import CreatePipelineJobScheduleDto # type: ignore
from ._models import CreatePublishedPipelineRequest # type: ignore
from ._models import CreateRealTimeEndpointRequest # type: ignore
from ._models import CreatedBy # type: ignore
from ._models import CreatedFromDto # type: ignore
from ._models import CreationContext # type: ignore
from ._models import Cron # type: ignore
from ._models import CustomConnectionConfig # type: ignore
from ._models import CustomReference # type: ignore
from ._models import DBFSReference # type: ignore
from ._models import Data # type: ignore
from ._models import DataInfo # type: ignore
from ._models import DataLocation # type: ignore
from ._models import DataPath # type: ignore
from ._models import DataPathParameter # type: ignore
from ._models import DataPortDto # type: ignore
from ._models import DataReference # type: ignore
from ._models import DataReferenceConfiguration # type: ignore
from ._models import DataSetDefinition # type: ignore
from ._models import DataSetDefinitionValue # type: ignore
from ._models import DataSetPathParameter # type: ignore
from ._models import DataSettings # type: ignore
from ._models import DataTransferCloudConfiguration # type: ignore
from ._models import DataTransferSink # type: ignore
from ._models import DataTransferSource # type: ignore
from ._models import DataTransferV2CloudSetting # type: ignore
from ._models import DataTypeCreationInfo # type: ignore
from ._models import DatabaseSink # type: ignore
from ._models import DatabaseSource # type: ignore
from ._models import DatabricksComputeInfo # type: ignore
from ._models import DatabricksConfiguration # type: ignore
from ._models import DatacacheConfiguration # type: ignore
from ._models import DatasetIdentifier # type: ignore
from ._models import DatasetInputDetails # type: ignore
from ._models import DatasetLineage # type: ignore
from ._models import DatasetOutput # type: ignore
from ._models import DatasetOutputDetails # type: ignore
from ._models import DatasetOutputOptions # type: ignore
from ._models import DatasetRegistration # type: ignore
from ._models import DatasetRegistrationOptions # type: ignore
from ._models import DatastoreSetting # type: ignore
from ._models import DbfsStorageInfoDto # type: ignore
from ._models import DebugInfoResponse # type: ignore
from ._models import DeployFlowRequest # type: ignore
from ._models import DeploymentInfo # type: ignore
from ._models import DistributionConfiguration # type: ignore
from ._models import DistributionParameter # type: ignore
from ._models import DoWhileControlFlowInfo # type: ignore
from ._models import DoWhileControlFlowRunSettings # type: ignore
from ._models import DockerBuildContext # type: ignore
from ._models import DockerConfiguration # type: ignore
from ._models import DockerImagePlatform # type: ignore
from ._models import DockerSection # type: ignore
from ._models import DockerSettingConfiguration # type: ignore
from ._models import DownloadResourceInfo # type: ignore
from ._models import EPRPipelineRunErrorClassificationRequest # type: ignore
from ._models import EndpointSetting # type: ignore
from ._models import EntityInterface # type: ignore
from ._models import EntrySetting # type: ignore
from ._models import EnumParameterRule # type: ignore
from ._models import EnvironmentConfiguration # type: ignore
from ._models import EnvironmentDefinition # type: ignore
from ._models import EnvironmentDefinitionDto # type: ignore
from ._models import ErrorAdditionalInfo # type: ignore
from ._models import ErrorResponse # type: ignore
from ._models import EsCloudConfiguration # type: ignore
from ._models import EvaluationFlowRunSettings # type: ignore
from ._models import ExampleRequest # type: ignore
from ._models import ExecutionContextDto # type: ignore
from ._models import ExecutionDataLocation # type: ignore
from ._models import ExecutionDataPath # type: ignore
from ._models import ExecutionGlobsOptions # type: ignore
from ._models import ExperimentComputeMetaInfo # type: ignore
from ._models import ExperimentInfo # type: ignore
from ._models import ExportComponentMetaInfo # type: ignore
from ._models import ExportDataTask # type: ignore
from ._models import FeaturizationSettings # type: ignore
from ._models import FeedDto # type: ignore
from ._models import FeedDtoSupportedAssetTypes # type: ignore
from ._models import FileSystem # type: ignore
from ._models import Flow # type: ignore
from ._models import FlowAnnotations # type: ignore
from ._models import FlowBaseDto # type: ignore
from ._models import FlowDto # type: ignore
from ._models import FlowEnvironment # type: ignore
from ._models import FlowFeature # type: ignore
from ._models import FlowFeatureState # type: ignore
from ._models import FlowGraph # type: ignore
from ._models import FlowGraphAnnotationNode # type: ignore
from ._models import FlowGraphLayout # type: ignore
from ._models import FlowGraphReference # type: ignore
from ._models import FlowIndexEntity # type: ignore
from ._models import FlowInputDefinition # type: ignore
from ._models import FlowNode # type: ignore
from ._models import FlowNodeLayout # type: ignore
from ._models import FlowNodeVariant # type: ignore
from ._models import FlowOutputDefinition # type: ignore
from ._models import FlowProperties # type: ignore
from ._models import FlowRunBasePath # type: ignore
from ._models import FlowRunInfo # type: ignore
from ._models import FlowRunResult # type: ignore
from ._models import FlowRunSettings # type: ignore
from ._models import FlowRunSettingsBase # type: ignore
from ._models import FlowRunStatusResponse # type: ignore
from ._models import FlowRuntimeCapability # type: ignore
from ._models import FlowRuntimeDto # type: ignore
from ._models import FlowSampleDto # type: ignore
from ._models import FlowSessionDto # type: ignore
from ._models import FlowSnapshot # type: ignore
from ._models import FlowSubmitRunSettings # type: ignore
from ._models import FlowTestInfo # type: ignore
from ._models import FlowTestStorageSetting # type: ignore
from ._models import FlowToolSettingParameter # type: ignore
from ._models import FlowToolsDto # type: ignore
from ._models import FlowVariantNode # type: ignore
from ._models import ForecastHorizon # type: ignore
from ._models import ForecastingSettings # type: ignore
from ._models import GeneralSettings # type: ignore
from ._models import GeneratePipelineComponentRequest # type: ignore
from ._models import GenerateToolMetaRequest # type: ignore
from ._models import GetDynamicListRequest # type: ignore
from ._models import GetRunDataResultDto # type: ignore
from ._models import GetTrainingSessionDto # type: ignore
from ._models import GlobalJobDispatcherConfiguration # type: ignore
from ._models import GlobsOptions # type: ignore
from ._models import GraphAnnotationNode # type: ignore
from ._models import GraphControlNode # type: ignore
from ._models import GraphControlReferenceNode # type: ignore
from ._models import GraphDatasetNode # type: ignore
from ._models import GraphDraftEntity # type: ignore
from ._models import GraphEdge # type: ignore
from ._models import GraphLayout # type: ignore
from ._models import GraphLayoutCreationInfo # type: ignore
from ._models import GraphModuleNode # type: ignore
from ._models import GraphModuleNodeRunSetting # type: ignore
from ._models import GraphModuleNodeUIInputSetting # type: ignore
from ._models import GraphNodeStatusInfo # type: ignore
from ._models import GraphReferenceNode # type: ignore
from ._models import HdfsReference # type: ignore
from ._models import HdiClusterComputeInfo # type: ignore
from ._models import HdiConfiguration # type: ignore
from ._models import HdiRunConfiguration # type: ignore
from ._models import HistoryConfiguration # type: ignore
from ._models import HyperDriveConfiguration # type: ignore
from ._models import ICheckableLongRunningOperationResponse # type: ignore
from ._models import IdentityConfiguration # type: ignore
from ._models import IdentitySetting # type: ignore
from ._models import ImportDataTask # type: ignore
from ._models import IndexedErrorResponse # type: ignore
from ._models import InitScriptInfoDto # type: ignore
from ._models import InnerErrorDetails # type: ignore
from ._models import InnerErrorResponse # type: ignore
from ._models import InputAsset # type: ignore
from ._models import InputData # type: ignore
from ._models import InputDataBinding # type: ignore
from ._models import InputDefinition # type: ignore
from ._models import InputOutputPortMetadata # type: ignore
from ._models import InputSetting # type: ignore
from ._models import IntellectualPropertyPublisherInformation # type: ignore
from ._models import InteractiveConfig # type: ignore
from ._models import InteractiveConfiguration # type: ignore
from ._models import JobCost # type: ignore
from ._models import JobEndpoint # type: ignore
from ._models import JobInput # type: ignore
from ._models import JobOutput # type: ignore
from ._models import JobOutputArtifacts # type: ignore
from ._models import JobScheduleDto # type: ignore
from ._models import K8SConfiguration # type: ignore
from ._models import KeyValuePairComponentNameMetaInfoErrorResponse # type: ignore
from ._models import KeyValuePairComponentNameMetaInfoModuleDto # type: ignore
from ._models import KeyValuePairStringObject # type: ignore
from ._models import KubernetesConfiguration # type: ignore
from ._models import Kwarg # type: ignore
from ._models import LegacyDataPath # type: ignore
from ._models import LimitSettings # type: ignore
from ._models import LinkedADBWorkspaceMetadata # type: ignore
from ._models import LinkedPipelineInfo # type: ignore
from ._models import LoadFlowAsComponentRequest # type: ignore
from ._models import LogRunTerminatedEventDto # type: ignore
from ._models import LongRunningOperationUriResponse # type: ignore
from ._models import LongRunningUpdateRegistryComponentRequest # type: ignore
from ._models import ManagedServiceIdentity # type: ignore
from ._models import MavenLibraryDto # type: ignore
from ._models import MetricProperties # type: ignore
from ._models import MetricSchemaDto # type: ignore
from ._models import MetricSchemaPropertyDto # type: ignore
from ._models import MetricV2Dto # type: ignore
from ._models import MetricV2Value # type: ignore
from ._models import MfeInternalAutologgerSettings # type: ignore
from ._models import MfeInternalIdentityConfiguration # type: ignore
from ._models import MfeInternalNodes # type: ignore
from ._models import MfeInternalOutputData # type: ignore
from ._models import MfeInternalSecretConfiguration # type: ignore
from ._models import MfeInternalUriReference # type: ignore
from ._models import MfeInternalV20211001ComponentJob # type: ignore
from ._models import MinMaxParameterRule # type: ignore
from ._models import MlcComputeInfo # type: ignore
from ._models import ModelDto # type: ignore
from ._models import ModelManagementErrorResponse # type: ignore
from ._models import ModifyPipelineJobScheduleDto # type: ignore
from ._models import ModuleDto # type: ignore
from ._models import ModuleDtoWithErrors # type: ignore
from ._models import ModuleDtoWithValidateStatus # type: ignore
from ._models import ModuleEntity # type: ignore
from ._models import ModulePythonInterface # type: ignore
from ._models import MpiConfiguration # type: ignore
from ._models import NCrossValidations # type: ignore
from ._models import Node # type: ignore
from ._models import NodeInputPort # type: ignore
from ._models import NodeLayout # type: ignore
from ._models import NodeOutputPort # type: ignore
from ._models import NodePortInterface # type: ignore
from ._models import NodeSource # type: ignore
from ._models import NodeTelemetryMetaInfo # type: ignore
from ._models import NodeVariant # type: ignore
from ._models import Nodes # type: ignore
from ._models import NoteBookTaskDto # type: ignore
from ._models import NotificationSetting # type: ignore
from ._models import ODataError # type: ignore
from ._models import ODataErrorDetail # type: ignore
from ._models import ODataErrorResponse # type: ignore
from ._models import ODataInnerError # type: ignore
from ._models import OutputData # type: ignore
from ._models import OutputDataBinding # type: ignore
from ._models import OutputDatasetLineage # type: ignore
from ._models import OutputDefinition # type: ignore
from ._models import OutputOptions # type: ignore
from ._models import OutputSetting # type: ignore
from ._models import OutputSettingSpec # type: ignore
from ._models import PaginatedDataInfoList # type: ignore
from ._models import PaginatedModelDtoList # type: ignore
from ._models import PaginatedModuleDtoList # type: ignore
from ._models import PaginatedPipelineDraftSummaryList # type: ignore
from ._models import PaginatedPipelineEndpointSummaryList # type: ignore
from ._models import PaginatedPipelineRunSummaryList # type: ignore
from ._models import PaginatedPublishedPipelineSummaryList # type: ignore
from ._models import ParallelForControlFlowInfo # type: ignore
from ._models import ParallelTaskConfiguration # type: ignore
from ._models import Parameter # type: ignore
from ._models import ParameterAssignment # type: ignore
from ._models import ParameterDefinition # type: ignore
from ._models import PatchFlowRequest # type: ignore
from ._models import Pipeline # type: ignore
from ._models import PipelineDraft # type: ignore
from ._models import PipelineDraftStepDetails # type: ignore
from ._models import PipelineDraftSummary # type: ignore
from ._models import PipelineEndpoint # type: ignore
from ._models import PipelineEndpointSummary # type: ignore
from ._models import PipelineGraph # type: ignore
from ._models import PipelineInput # type: ignore
from ._models import PipelineJob # type: ignore
from ._models import PipelineJobRuntimeBasicSettings # type: ignore
from ._models import PipelineJobScheduleDto # type: ignore
from ._models import PipelineOutput # type: ignore
from ._models import PipelineRun # type: ignore
from ._models import PipelineRunGraphDetail # type: ignore
from ._models import PipelineRunGraphStatus # type: ignore
from ._models import PipelineRunProfile # type: ignore
from ._models import PipelineRunStatus # type: ignore
from ._models import PipelineRunStepDetails # type: ignore
from ._models import PipelineRunSummary # type: ignore
from ._models import PipelineStatus # type: ignore
from ._models import PipelineStepRun # type: ignore
from ._models import PipelineStepRunOutputs # type: ignore
from ._models import PipelineSubDraft # type: ignore
from ._models import PolicyValidationResponse # type: ignore
from ._models import PortInfo # type: ignore
from ._models import PortOutputInfo # type: ignore
from ._models import PriorityConfig # type: ignore
from ._models import PriorityConfiguration # type: ignore
from ._models import PromoteDataSetRequest # type: ignore
from ._models import ProviderEntity # type: ignore
from ._models import PublishedPipeline # type: ignore
from ._models import PublishedPipelineSummary # type: ignore
from ._models import PyTorchConfiguration # type: ignore
from ._models import PythonInterfaceMapping # type: ignore
from ._models import PythonPyPiOrRCranLibraryDto # type: ignore
from ._models import PythonSection # type: ignore
from ._models import QueueingInfo # type: ignore
from ._models import RCranPackage # type: ignore
from ._models import RGitHubPackage # type: ignore
from ._models import RSection # type: ignore
from ._models import RawComponentDto # type: ignore
from ._models import RayConfiguration # type: ignore
from ._models import RealTimeEndpoint # type: ignore
from ._models import RealTimeEndpointInfo # type: ignore
from ._models import RealTimeEndpointStatus # type: ignore
from ._models import RealTimeEndpointSummary # type: ignore
from ._models import RealTimeEndpointTestRequest # type: ignore
from ._models import Recurrence # type: ignore
from ._models import RecurrencePattern # type: ignore
from ._models import RecurrenceSchedule # type: ignore
from ._models import RegenerateServiceKeysRequest # type: ignore
from ._models import RegisterComponentMetaInfo # type: ignore
from ._models import RegisterComponentMetaInfoExtraHashes # type: ignore
from ._models import RegisterComponentMetaInfoIdentifierHashes # type: ignore
from ._models import RegisterRegistryComponentMetaInfo # type: ignore
from ._models import RegisterRegistryComponentMetaInfoExtraHashes # type: ignore
from ._models import RegisterRegistryComponentMetaInfoIdentifierHashes # type: ignore
from ._models import RegisteredDataSetReference # type: ignore
from ._models import RegistrationOptions # type: ignore
from ._models import RegistryBlobReferenceData # type: ignore
from ._models import RegistryIdentity # type: ignore
from ._models import Relationship # type: ignore
from ._models import RemoteDockerComputeInfo # type: ignore
from ._models import ResourceConfig # type: ignore
from ._models import ResourceConfiguration # type: ignore
from ._models import ResourcesSetting # type: ignore
from ._models import RetrieveToolFuncResultRequest # type: ignore
from ._models import RetryConfiguration # type: ignore
from ._models import RootError # type: ignore
from ._models import RunAnnotations # type: ignore
from ._models import RunCommandsCommandResult # type: ignore
from ._models import RunConfiguration # type: ignore
from ._models import RunDatasetReference # type: ignore
from ._models import RunDefinition # type: ignore
from ._models import RunDetailsDto # type: ignore
from ._models import RunDetailsWarningDto # type: ignore
from ._models import RunDto # type: ignore
from ._models import RunIndexEntity # type: ignore
from ._models import RunIndexMetricSummary # type: ignore
from ._models import RunIndexMetricSummarySystemObject # type: ignore
from ._models import RunIndexResourceMetricSummary # type: ignore
from ._models import RunMetricDto # type: ignore
from ._models import RunMetricsTypesDto # type: ignore
from ._models import RunProperties # type: ignore
from ._models import RunSettingParameter # type: ignore
from ._models import RunSettingParameterAssignment # type: ignore
from ._models import RunSettingUIParameterHint # type: ignore
from ._models import RunStatusPeriod # type: ignore
from ._models import RunTypeV2 # type: ignore
from ._models import RunTypeV2Index # type: ignore
from ._models import RuntimeConfiguration # type: ignore
from ._models import SampleMeta # type: ignore
from ._models import SavePipelineDraftRequest # type: ignore
from ._models import SavedDataSetReference # type: ignore
from ._models import ScheduleBase # type: ignore
from ._models import SchemaContractsCreatedBy # type: ignore
from ._models import ScopeCloudConfiguration # type: ignore
from ._models import Seasonality # type: ignore
from ._models import SecretConfiguration # type: ignore
from ._models import SegmentedResult1 # type: ignore
from ._models import ServiceLogRequest # type: ignore
from ._models import SessionApplication # type: ignore
from ._models import SessionApplicationRunCommandResult # type: ignore
from ._models import SessionProperties # type: ignore
from ._models import SetupFlowSessionRequest # type: ignore
from ._models import SharingScope # type: ignore
from ._models import Snapshot # type: ignore
from ._models import SnapshotInfo # type: ignore
from ._models import SourceCodeDataReference # type: ignore
from ._models import SparkConfiguration # type: ignore
from ._models import SparkJarTaskDto # type: ignore
from ._models import SparkJob # type: ignore
from ._models import SparkJobEntry # type: ignore
from ._models import SparkMavenPackage # type: ignore
from ._models import SparkPythonTaskDto # type: ignore
from ._models import SparkResourceConfiguration # type: ignore
from ._models import SparkSection # type: ignore
from ._models import SparkSubmitTaskDto # type: ignore
from ._models import SqlDataPath # type: ignore
from ._models import StackEnsembleSettings # type: ignore
from ._models import StandbyPoolProperties # type: ignore
from ._models import StandbyPoolResourceStatus # type: ignore
from ._models import StartRunResult # type: ignore
from ._models import StepRunProfile # type: ignore
from ._models import StorageInfo # type: ignore
from ._models import StoredProcedureParameter # type: ignore
from ._models import Stream # type: ignore
from ._models import StructuredInterface # type: ignore
from ._models import StructuredInterfaceInput # type: ignore
from ._models import StructuredInterfaceOutput # type: ignore
from ._models import StructuredInterfaceParameter # type: ignore
from ._models import StudioMigrationInfo # type: ignore
from ._models import SubGraphConcatenateAssignment # type: ignore
from ._models import SubGraphConfiguration # type: ignore
from ._models import SubGraphConnectionInfo # type: ignore
from ._models import SubGraphDataPathParameterAssignment # type: ignore
from ._models import SubGraphInfo # type: ignore
from ._models import SubGraphParameterAssignment # type: ignore
from ._models import SubGraphPortInfo # type: ignore
from ._models import SubPipelineDefinition # type: ignore
from ._models import SubPipelineParameterAssignment # type: ignore
from ._models import SubPipelinesInfo # type: ignore
from ._models import SubStatusPeriod # type: ignore
from ._models import SubmitBulkRunRequest # type: ignore
from ._models import SubmitBulkRunResponse # type: ignore
from ._models import SubmitFlowRequest # type: ignore
from ._models import SubmitPipelineRunRequest # type: ignore
from ._models import SweepEarlyTerminationPolicy # type: ignore
from ._models import SweepSettings # type: ignore
from ._models import SweepSettingsLimits # type: ignore
from ._models import SystemData # type: ignore
from ._models import SystemMeta # type: ignore
from ._models import SystemMetaExtraHashes # type: ignore
from ._models import SystemMetaIdentifierHashes # type: ignore
from ._models import TargetLags # type: ignore
from ._models import TargetRollingWindowSize # type: ignore
from ._models import TargetSelectorConfiguration # type: ignore
from ._models import Task # type: ignore
from ._models import TaskControlFlowInfo # type: ignore
from ._models import TaskReuseInfo # type: ignore
from ._models import TensorflowConfiguration # type: ignore
from ._models import TestDataSettings # type: ignore
from ._models import Tool # type: ignore
from ._models import ToolFuncResponse # type: ignore
from ._models import ToolInputDynamicList # type: ignore
from ._models import ToolInputGeneratedBy # type: ignore
from ._models import ToolMetaDto # type: ignore
from ._models import ToolSetting # type: ignore
from ._models import ToolSourceMeta # type: ignore
from ._models import TorchDistributedConfiguration # type: ignore
from ._models import TrainingDiagnosticConfiguration # type: ignore
from ._models import TrainingOutput # type: ignore
from ._models import TrainingSettings # type: ignore
from ._models import TriggerAsyncOperationStatus # type: ignore
from ._models import TuningNodeRunSetting # type: ignore
from ._models import TuningNodeSetting # type: ignore
from ._models import TypedAssetReference # type: ignore
from ._models import UIAzureOpenAIDeploymentNameSelector # type: ignore
from ._models import UIAzureOpenAIModelCapabilities # type: ignore
from ._models import UIColumnPicker # type: ignore
from ._models import UIComputeSelection # type: ignore
from ._models import UIHyperparameterConfiguration # type: ignore
from ._models import UIInputSetting # type: ignore
from ._models import UIJsonEditor # type: ignore
from ._models import UIParameterHint # type: ignore
from ._models import UIPromptFlowConnectionSelector # type: ignore
from ._models import UIWidgetMetaInfo # type: ignore
from ._models import UIYamlEditor # type: ignore
from ._models import UnversionedEntityRequestDto # type: ignore
from ._models import UnversionedEntityResponseDto # type: ignore
from ._models import UnversionedRebuildIndexDto # type: ignore
from ._models import UnversionedRebuildResponseDto # type: ignore
from ._models import UpdateComponentRequest # type: ignore
from ._models import UpdateFlowRequest # type: ignore
from ._models import UpdateFlowRuntimeRequest # type: ignore
from ._models import UpdateFlowStatusRequest # type: ignore
from ._models import UpdateRegistryComponentRequest # type: ignore
from ._models import UploadOptions # type: ignore
from ._models import UriReference # type: ignore
from ._models import User # type: ignore
from ._models import UserAssignedIdentity # type: ignore
from ._models import ValidationDataSettings # type: ignore
from ._models import VariantIdentifier # type: ignore
from ._models import VariantNode # type: ignore
from ._models import Volume # type: ignore
from ._models import WebServiceComputeMetaInfo # type: ignore
from ._models import WebServicePort # type: ignore
from ._models import Webhook # type: ignore
from ._models import WorkspaceConnectionSpec # type: ignore
from ._azure_machine_learning_designer_service_client_enums import (
AEVAAssetType,
AEVADataStoreMode,
AEVAIdentityType,
ActionType,
AetherArgumentValueType,
AetherAssetType,
AetherBuildSourceType,
AetherComputeType,
AetherControlFlowType,
AetherControlInputValue,
AetherDataCopyMode,
AetherDataLocationStorageType,
AetherDataReferenceType,
AetherDataStoreMode,
AetherDataTransferStorageType,
AetherDataTransferTaskType,
AetherDatasetType,
AetherEarlyTerminationPolicyType,
AetherEntityStatus,
AetherExecutionEnvironment,
AetherExecutionPhase,
AetherFeaturizationMode,
AetherFileBasedPathType,
AetherForecastHorizonMode,
AetherIdentityType,
AetherLogVerbosity,
AetherModuleDeploymentSource,
AetherModuleHashVersion,
AetherModuleType,
AetherNCrossValidationMode,
AetherParameterType,
AetherParameterValueType,
AetherPrimaryMetrics,
AetherRepositoryType,
AetherResourceOperator,
AetherResourceValueType,
AetherSamplingAlgorithmType,
AetherSeasonalityMode,
AetherShortSeriesHandlingConfiguration,
AetherStackMetaLearnerType,
AetherStoredProcedureParameterType,
AetherTabularTrainingMode,
AetherTargetAggregationFunction,
AetherTargetLagsMode,
AetherTargetRollingWindowSizeMode,
AetherTaskType,
AetherTrainingOutputType,
AetherUIScriptLanguageEnum,
AetherUIWidgetTypeEnum,
AetherUploadState,
AetherUseStl,
ApplicationEndpointType,
ArgumentValueType,
AssetScopeTypes,
AssetSourceType,
AssetType,
AutoDeleteCondition,
BuildContextLocationType,
Communicator,
ComponentRegistrationTypeEnum,
ComponentType,
ComputeEnvironmentType,
ComputeTargetType,
ComputeType,
ConfigValueType,
ConnectionCategory,
ConnectionScope,
ConnectionSourceType,
ConnectionType,
ConsumeMode,
ControlFlowType,
ControlInputValue,
DataBindingMode,
DataCategory,
DataCopyMode,
DataLocationStorageType,
DataPortType,
DataReferenceType,
DataSourceType,
DataStoreMode,
DataTransferStorageType,
DataTransferTaskType,
DataTypeMechanism,
DatasetAccessModes,
DatasetConsumptionType,
DatasetDeliveryMechanism,
DatasetOutputType,
DatasetType,
DeliveryMechanism,
DistributionParameterEnum,
DistributionType,
EarlyTerminationPolicyType,
EmailNotificationEnableType,
EndpointAuthMode,
EntityKind,
EntityStatus,
ErrorHandlingMode,
ExecutionPhase,
FeaturizationMode,
FlowFeatureStateEnum,
FlowLanguage,
FlowPatchOperationType,
FlowRunMode,
FlowRunStatusEnum,
FlowRunTypeEnum,
FlowTestMode,
FlowType,
ForecastHorizonMode,
Framework,
Frequency,
GlobalJobDispatcherSupportedComputeType,
GraphComponentsMode,
GraphDatasetsLoadModes,
GraphSdkCodeType,
HttpStatusCode,
IdentityType,
InputType,
IntellectualPropertyAccessMode,
JobInputType,
JobLimitsType,
JobOutputType,
JobProvisioningState,
JobStatus,
JobType,
KeyType,
ListViewType,
LogLevel,
LogVerbosity,
LongRunningUpdateType,
MLFlowAutologgerState,
ManagedServiceIdentityType,
MetricValueType,
MfeInternalIdentityType,
MfeInternalMLFlowAutologgerState,
MfeInternalScheduleStatus,
ModuleDtoFields,
ModuleInfoFromYamlStatusEnum,
ModuleRunSettingTypes,
ModuleScope,
ModuleSourceType,
ModuleType,
ModuleUpdateOperationType,
ModuleWorkingMechanism,
NCrossValidationMode,
NodeCompositionMode,
NodesValueType,
Orientation,
OutputMechanism,
ParameterType,
ParameterValueType,
PipelineDraftMode,
PipelineRunStatusCode,
PipelineStatusCode,
PipelineType,
PortAction,
PrimaryMetrics,
PromptflowEngineType,
ProvisioningState,
RealTimeEndpointInternalStepCode,
RealTimeEndpointOpCode,
RealTimeEndpointOpStatusCode,
RecurrenceFrequency,
RunDisplayNameGenerationType,
RunSettingParameterType,
RunSettingUIWidgetTypeEnum,
RunStatus,
RunType,
RuntimeStatusEnum,
RuntimeType,
SamplingAlgorithmType,
ScheduleProvisioningStatus,
ScheduleStatus,
ScheduleType,
ScopeType,
ScriptType,
SeasonalityMode,
Section,
SessionConfigModeEnum,
SessionSetupModeEnum,
SetupFlowSessionAction,
SeverityLevel,
ShortSeriesHandlingConfiguration,
StackMetaLearnerType,
StorageAuthType,
StoredProcedureParameterType,
SuccessfulCommandReturnCode,
TabularTrainingMode,
TargetAggregationFunction,
TargetLagsMode,
TargetRollingWindowSizeMode,
TaskCreationOptions,
TaskStatus,
TaskStatusCode,
TaskType,
ToolFuncCallScenario,
ToolState,
ToolType,
TrainingOutputType,
TriggerOperationType,
TriggerType,
UIInputDataDeliveryMode,
UIScriptLanguageEnum,
UIWidgetTypeEnum,
UploadState,
UseStl,
UserType,
ValidationStatus,
ValueType,
VmPriority,
WebServiceState,
WeekDays,
Weekday,
YarnDeployMode,
)
__all__ = [
'ACIAdvanceSettings',
'AEVAComputeConfiguration',
'AEVAResourceConfiguration',
'AISuperComputerConfiguration',
'AISuperComputerScalePolicy',
'AISuperComputerStorageReferenceConfiguration',
'AKSAdvanceSettings',
'AKSReplicaStatus',
'AMLComputeConfiguration',
'APCloudConfiguration',
'Activate',
'AdditionalErrorInfo',
'AdhocTriggerScheduledCommandJobRequest',
'AdhocTriggerScheduledSparkJobRequest',
'AetherAPCloudConfiguration',
'AetherAmlDataset',
'AetherAmlSparkCloudSetting',
'AetherArgumentAssignment',
'AetherAssetDefinition',
'AetherAssetOutputSettings',
'AetherAutoFeaturizeConfiguration',
'AetherAutoMLComponentConfiguration',
'AetherAutoTrainConfiguration',
'AetherAzureBlobReference',
'AetherAzureDataLakeGen2Reference',
'AetherAzureDataLakeReference',
'AetherAzureDatabaseReference',
'AetherAzureFilesReference',
'AetherBatchAiComputeInfo',
'AetherBuildArtifactInfo',
'AetherCloudBuildDropPathInfo',
'AetherCloudBuildInfo',
'AetherCloudBuildQueueInfo',
'AetherCloudPrioritySetting',
'AetherCloudSettings',
'AetherColumnTransformer',
'AetherComputeConfiguration',
'AetherComputeSetting',
'AetherControlInput',
'AetherControlOutput',
'AetherCopyDataTask',
'AetherCosmosReference',
'AetherCreatedBy',
'AetherCustomReference',
'AetherDBFSReference',
'AetherDataLocation',
'AetherDataLocationReuseCalculationFields',
'AetherDataPath',
'AetherDataReference',
'AetherDataSetDefinition',
'AetherDataSetDefinitionValue',
'AetherDataSettings',
'AetherDataTransferCloudConfiguration',
'AetherDataTransferSink',
'AetherDataTransferSource',
'AetherDataTransferV2CloudSetting',
'AetherDatabaseSink',
'AetherDatabaseSource',
'AetherDatabricksComputeInfo',
'AetherDatasetOutput',
'AetherDatasetOutputOptions',
'AetherDatasetRegistration',
'AetherDatastoreSetting',
'AetherDoWhileControlFlowInfo',
'AetherDoWhileControlFlowRunSettings',
'AetherDockerSettingConfiguration',
'AetherEntityInterfaceDocumentation',
'AetherEntrySetting',
'AetherEnvironmentConfiguration',
'AetherEsCloudConfiguration',
'AetherExportDataTask',
'AetherFeaturizationSettings',
'AetherFileSystem',
'AetherForecastHorizon',
'AetherForecastingSettings',
'AetherGeneralSettings',
'AetherGlobsOptions',
'AetherGraphControlNode',
'AetherGraphControlReferenceNode',
'AetherGraphDatasetNode',
'AetherGraphEdge',
'AetherGraphEntity',
'AetherGraphModuleNode',
'AetherGraphReferenceNode',
'AetherHdfsReference',
'AetherHdiClusterComputeInfo',
'AetherHdiRunConfiguration',
'AetherHyperDriveConfiguration',
'AetherIdentitySetting',
'AetherImportDataTask',
'AetherInputSetting',
'AetherInteractiveConfig',
'AetherK8SConfiguration',
'AetherLegacyDataPath',
'AetherLimitSettings',
'AetherMlcComputeInfo',
'AetherModuleEntity',
'AetherModuleExtendedProperties',
'AetherNCrossValidations',
'AetherOutputSetting',
'AetherParallelForControlFlowInfo',
'AetherParameterAssignment',
'AetherPhillyHdfsReference',
'AetherPortInfo',
'AetherPriorityConfig',
'AetherPriorityConfiguration',
'AetherRegisteredDataSetReference',
'AetherRemoteDockerComputeInfo',
'AetherResourceAssignment',
'AetherResourceAttributeAssignment',
'AetherResourceAttributeDefinition',
'AetherResourceConfig',
'AetherResourceConfiguration',
'AetherResourceModel',
'AetherResourcesSetting',
'AetherSavedDataSetReference',
'AetherScopeCloudConfiguration',
'AetherSeasonality',
'AetherSqlDataPath',
'AetherStackEnsembleSettings',
'AetherStoredProcedureParameter',
'AetherStructuredInterface',
'AetherStructuredInterfaceInput',
'AetherStructuredInterfaceOutput',
'AetherStructuredInterfaceParameter',
'AetherSubGraphConfiguration',
'AetherSweepEarlyTerminationPolicy',
'AetherSweepSettings',
'AetherSweepSettingsLimits',
'AetherTargetLags',
'AetherTargetRollingWindowSize',
'AetherTargetSelectorConfiguration',
'AetherTestDataSettings',
'AetherTorchDistributedConfiguration',
'AetherTrainingOutput',
'AetherTrainingSettings',
'AetherUIAzureOpenAIDeploymentNameSelector',
'AetherUIAzureOpenAIModelCapabilities',
'AetherUIColumnPicker',
'AetherUIJsonEditor',
'AetherUIParameterHint',
'AetherUIPromptFlowConnectionSelector',
'AetherValidationDataSettings',
'AetherVsoBuildArtifactInfo',
'AetherVsoBuildDefinitionInfo',
'AetherVsoBuildInfo',
'AmlDataset',
'AmlK8SConfiguration',
'AmlK8SPriorityConfiguration',
'AmlSparkCloudSetting',
'ApiAndParameters',
'ApplicationEndpointConfiguration',
'ArgumentAssignment',
'Asset',
'AssetDefinition',
'AssetNameAndVersionIdentifier',
'AssetOutputSettings',
'AssetOutputSettingsParameter',
'AssetPublishResult',
'AssetPublishSingleRegionResult',
'AssetTypeMetaInfo',
'AssetVersionPublishRequest',
'AssignedUser',
'AttachCosmosRequest',
'AuthKeys',
'AutoClusterComputeSpecification',
'AutoDeleteSetting',
'AutoFeaturizeConfiguration',
'AutoMLComponentConfiguration',
'AutoScaler',
'AutoTrainConfiguration',
'AutologgerSettings',
'AvailabilityResponse',
'AzureBlobReference',
'AzureDataLakeGen2Reference',
'AzureDataLakeReference',
'AzureDatabaseReference',
'AzureFilesReference',
'AzureMLModuleVersionDescriptor',
'AzureOpenAIDeploymentDto',
'AzureOpenAIModelCapabilities',
'BatchAiComputeInfo',
'BatchDataInput',
'BatchExportComponentSpecResponse',
'BatchExportRawComponentResponse',
'BatchGetComponentHashesRequest',
'BatchGetComponentRequest',
'Binding',
'BulkTestDto',
'CloudError',
'CloudPrioritySetting',
'CloudSettings',
'ColumnTransformer',
'CommandJob',
'CommandJobLimits',
'CommandReturnCodeConfig',
'ComponentConfiguration',
'ComponentInput',
'ComponentJob',
'ComponentJobInput',
'ComponentJobOutput',
'ComponentNameAndDefaultVersion',
'ComponentNameMetaInfo',
'ComponentOutput',
'ComponentPreflightResult',
'ComponentSpecMetaInfo',
'ComponentUpdateRequest',
'ComponentValidationRequest',
'ComponentValidationResponse',
'Compute',
'ComputeConfiguration',
'ComputeContract',
'ComputeIdentityContract',
'ComputeIdentityDto',
'ComputeInfo',
'ComputeProperties',
'ComputeRPUserAssignedIdentity',
'ComputeRequest',
'ComputeSetting',
'ComputeStatus',
'ComputeStatusDetail',
'ComputeWarning',
'ConnectionConfigSpec',
'ConnectionDto',
'ConnectionEntity',
'ConnectionOverrideSetting',
'ConnectionSpec',
'ContainerInstanceConfiguration',
'ContainerRegistry',
'ContainerResourceRequirements',
'ControlInput',
'ControlOutput',
'CopyDataTask',
'CreateFlowRequest',
'CreateFlowRuntimeRequest',
'CreateFlowSessionRequest',
'CreateInferencePipelineRequest',
'CreateOrUpdateConnectionRequest',
'CreateOrUpdateConnectionRequestDto',
'CreatePipelineDraftRequest',
'CreatePipelineJobScheduleDto',
'CreatePublishedPipelineRequest',
'CreateRealTimeEndpointRequest',
'CreatedBy',
'CreatedFromDto',
'CreationContext',
'Cron',
'CustomConnectionConfig',
'CustomReference',
'DBFSReference',
'Data',
'DataInfo',
'DataLocation',
'DataPath',
'DataPathParameter',
'DataPortDto',
'DataReference',
'DataReferenceConfiguration',
'DataSetDefinition',
'DataSetDefinitionValue',
'DataSetPathParameter',
'DataSettings',
'DataTransferCloudConfiguration',
'DataTransferSink',
'DataTransferSource',
'DataTransferV2CloudSetting',
'DataTypeCreationInfo',
'DatabaseSink',
'DatabaseSource',
'DatabricksComputeInfo',
'DatabricksConfiguration',
'DatacacheConfiguration',
'DatasetIdentifier',
'DatasetInputDetails',
'DatasetLineage',
'DatasetOutput',
'DatasetOutputDetails',
'DatasetOutputOptions',
'DatasetRegistration',
'DatasetRegistrationOptions',
'DatastoreSetting',
'DbfsStorageInfoDto',
'DebugInfoResponse',
'DeployFlowRequest',
'DeploymentInfo',
'DistributionConfiguration',
'DistributionParameter',
'DoWhileControlFlowInfo',
'DoWhileControlFlowRunSettings',
'DockerBuildContext',
'DockerConfiguration',
'DockerImagePlatform',
'DockerSection',
'DockerSettingConfiguration',
'DownloadResourceInfo',
'EPRPipelineRunErrorClassificationRequest',
'EndpointSetting',
'EntityInterface',
'EntrySetting',
'EnumParameterRule',
'EnvironmentConfiguration',
'EnvironmentDefinition',
'EnvironmentDefinitionDto',
'ErrorAdditionalInfo',
'ErrorResponse',
'EsCloudConfiguration',
'EvaluationFlowRunSettings',
'ExampleRequest',
'ExecutionContextDto',
'ExecutionDataLocation',
'ExecutionDataPath',
'ExecutionGlobsOptions',
'ExperimentComputeMetaInfo',
'ExperimentInfo',
'ExportComponentMetaInfo',
'ExportDataTask',
'FeaturizationSettings',
'FeedDto',
'FeedDtoSupportedAssetTypes',
'FileSystem',
'Flow',
'FlowAnnotations',
'FlowBaseDto',
'FlowDto',
'FlowEnvironment',
'FlowFeature',
'FlowFeatureState',
'FlowGraph',
'FlowGraphAnnotationNode',
'FlowGraphLayout',
'FlowGraphReference',
'FlowIndexEntity',
'FlowInputDefinition',
'FlowNode',
'FlowNodeLayout',
'FlowNodeVariant',
'FlowOutputDefinition',
'FlowProperties',
'FlowRunBasePath',
'FlowRunInfo',
'FlowRunResult',
'FlowRunSettings',
'FlowRunSettingsBase',
'FlowRunStatusResponse',
'FlowRuntimeCapability',
'FlowRuntimeDto',
'FlowSampleDto',
'FlowSessionDto',
'FlowSnapshot',
'FlowSubmitRunSettings',
'FlowTestInfo',
'FlowTestStorageSetting',
'FlowToolSettingParameter',
'FlowToolsDto',
'FlowVariantNode',
'ForecastHorizon',
'ForecastingSettings',
'GeneralSettings',
'GeneratePipelineComponentRequest',
'GenerateToolMetaRequest',
'GetDynamicListRequest',
'GetRunDataResultDto',
'GetTrainingSessionDto',
'GlobalJobDispatcherConfiguration',
'GlobsOptions',
'GraphAnnotationNode',
'GraphControlNode',
'GraphControlReferenceNode',
'GraphDatasetNode',
'GraphDraftEntity',
'GraphEdge',
'GraphLayout',
'GraphLayoutCreationInfo',
'GraphModuleNode',
'GraphModuleNodeRunSetting',
'GraphModuleNodeUIInputSetting',
'GraphNodeStatusInfo',
'GraphReferenceNode',
'HdfsReference',
'HdiClusterComputeInfo',
'HdiConfiguration',
'HdiRunConfiguration',
'HistoryConfiguration',
'HyperDriveConfiguration',
'ICheckableLongRunningOperationResponse',
'IdentityConfiguration',
'IdentitySetting',
'ImportDataTask',
'IndexedErrorResponse',
'InitScriptInfoDto',
'InnerErrorDetails',
'InnerErrorResponse',
'InputAsset',
'InputData',
'InputDataBinding',
'InputDefinition',
'InputOutputPortMetadata',
'InputSetting',
'IntellectualPropertyPublisherInformation',
'InteractiveConfig',
'InteractiveConfiguration',
'JobCost',
'JobEndpoint',
'JobInput',
'JobOutput',
'JobOutputArtifacts',
'JobScheduleDto',
'K8SConfiguration',
'KeyValuePairComponentNameMetaInfoErrorResponse',
'KeyValuePairComponentNameMetaInfoModuleDto',
'KeyValuePairStringObject',
'KubernetesConfiguration',
'Kwarg',
'LegacyDataPath',
'LimitSettings',
'LinkedADBWorkspaceMetadata',
'LinkedPipelineInfo',
'LoadFlowAsComponentRequest',
'LogRunTerminatedEventDto',
'LongRunningOperationUriResponse',
'LongRunningUpdateRegistryComponentRequest',
'ManagedServiceIdentity',
'MavenLibraryDto',
'MetricProperties',
'MetricSchemaDto',
'MetricSchemaPropertyDto',
'MetricV2Dto',
'MetricV2Value',
'MfeInternalAutologgerSettings',
'MfeInternalIdentityConfiguration',
'MfeInternalNodes',
'MfeInternalOutputData',
'MfeInternalSecretConfiguration',
'MfeInternalUriReference',
'MfeInternalV20211001ComponentJob',
'MinMaxParameterRule',
'MlcComputeInfo',
'ModelDto',
'ModelManagementErrorResponse',
'ModifyPipelineJobScheduleDto',
'ModuleDto',
'ModuleDtoWithErrors',
'ModuleDtoWithValidateStatus',
'ModuleEntity',
'ModulePythonInterface',
'MpiConfiguration',
'NCrossValidations',
'Node',
'NodeInputPort',
'NodeLayout',
'NodeOutputPort',
'NodePortInterface',
'NodeSource',
'NodeTelemetryMetaInfo',
'NodeVariant',
'Nodes',
'NoteBookTaskDto',
'NotificationSetting',
'ODataError',
'ODataErrorDetail',
'ODataErrorResponse',
'ODataInnerError',
'OutputData',
'OutputDataBinding',
'OutputDatasetLineage',
'OutputDefinition',
'OutputOptions',
'OutputSetting',
'OutputSettingSpec',
'PaginatedDataInfoList',
'PaginatedModelDtoList',
'PaginatedModuleDtoList',
'PaginatedPipelineDraftSummaryList',
'PaginatedPipelineEndpointSummaryList',
'PaginatedPipelineRunSummaryList',
'PaginatedPublishedPipelineSummaryList',
'ParallelForControlFlowInfo',
'ParallelTaskConfiguration',
'Parameter',
'ParameterAssignment',
'ParameterDefinition',
'PatchFlowRequest',
'Pipeline',
'PipelineDraft',
'PipelineDraftStepDetails',
'PipelineDraftSummary',
'PipelineEndpoint',
'PipelineEndpointSummary',
'PipelineGraph',
'PipelineInput',
'PipelineJob',
'PipelineJobRuntimeBasicSettings',
'PipelineJobScheduleDto',
'PipelineOutput',
'PipelineRun',
'PipelineRunGraphDetail',
'PipelineRunGraphStatus',
'PipelineRunProfile',
'PipelineRunStatus',
'PipelineRunStepDetails',
'PipelineRunSummary',
'PipelineStatus',
'PipelineStepRun',
'PipelineStepRunOutputs',
'PipelineSubDraft',
'PolicyValidationResponse',
'PortInfo',
'PortOutputInfo',
'PriorityConfig',
'PriorityConfiguration',
'PromoteDataSetRequest',
'ProviderEntity',
'PublishedPipeline',
'PublishedPipelineSummary',
'PyTorchConfiguration',
'PythonInterfaceMapping',
'PythonPyPiOrRCranLibraryDto',
'PythonSection',
'QueueingInfo',
'RCranPackage',
'RGitHubPackage',
'RSection',
'RawComponentDto',
'RayConfiguration',
'RealTimeEndpoint',
'RealTimeEndpointInfo',
'RealTimeEndpointStatus',
'RealTimeEndpointSummary',
'RealTimeEndpointTestRequest',
'Recurrence',
'RecurrencePattern',
'RecurrenceSchedule',
'RegenerateServiceKeysRequest',
'RegisterComponentMetaInfo',
'RegisterComponentMetaInfoExtraHashes',
'RegisterComponentMetaInfoIdentifierHashes',
'RegisterRegistryComponentMetaInfo',
'RegisterRegistryComponentMetaInfoExtraHashes',
'RegisterRegistryComponentMetaInfoIdentifierHashes',
'RegisteredDataSetReference',
'RegistrationOptions',
'RegistryBlobReferenceData',
'RegistryIdentity',
'Relationship',
'RemoteDockerComputeInfo',
'ResourceConfig',
'ResourceConfiguration',
'ResourcesSetting',
'RetrieveToolFuncResultRequest',
'RetryConfiguration',
'RootError',
'RunAnnotations',
'RunCommandsCommandResult',
'RunConfiguration',
'RunDatasetReference',
'RunDefinition',
'RunDetailsDto',
'RunDetailsWarningDto',
'RunDto',
'RunIndexEntity',
'RunIndexMetricSummary',
'RunIndexMetricSummarySystemObject',
'RunIndexResourceMetricSummary',
'RunMetricDto',
'RunMetricsTypesDto',
'RunProperties',
'RunSettingParameter',
'RunSettingParameterAssignment',
'RunSettingUIParameterHint',
'RunStatusPeriod',
'RunTypeV2',
'RunTypeV2Index',
'RuntimeConfiguration',
'SampleMeta',
'SavePipelineDraftRequest',
'SavedDataSetReference',
'ScheduleBase',
'SchemaContractsCreatedBy',
'ScopeCloudConfiguration',
'Seasonality',
'SecretConfiguration',
'SegmentedResult1',
'ServiceLogRequest',
'SessionApplication',
'SessionApplicationRunCommandResult',
'SessionProperties',
'SetupFlowSessionRequest',
'SharingScope',
'Snapshot',
'SnapshotInfo',
'SourceCodeDataReference',
'SparkConfiguration',
'SparkJarTaskDto',
'SparkJob',
'SparkJobEntry',
'SparkMavenPackage',
'SparkPythonTaskDto',
'SparkResourceConfiguration',
'SparkSection',
'SparkSubmitTaskDto',
'SqlDataPath',
'StackEnsembleSettings',
'StandbyPoolProperties',
'StandbyPoolResourceStatus',
'StartRunResult',
'StepRunProfile',
'StorageInfo',
'StoredProcedureParameter',
'Stream',
'StructuredInterface',
'StructuredInterfaceInput',
'StructuredInterfaceOutput',
'StructuredInterfaceParameter',
'StudioMigrationInfo',
'SubGraphConcatenateAssignment',
'SubGraphConfiguration',
'SubGraphConnectionInfo',
'SubGraphDataPathParameterAssignment',
'SubGraphInfo',
'SubGraphParameterAssignment',
'SubGraphPortInfo',
'SubPipelineDefinition',
'SubPipelineParameterAssignment',
'SubPipelinesInfo',
'SubStatusPeriod',
'SubmitBulkRunRequest',
'SubmitBulkRunResponse',
'SubmitFlowRequest',
'SubmitPipelineRunRequest',
'SweepEarlyTerminationPolicy',
'SweepSettings',
'SweepSettingsLimits',
'SystemData',
'SystemMeta',
'SystemMetaExtraHashes',
'SystemMetaIdentifierHashes',
'TargetLags',
'TargetRollingWindowSize',
'TargetSelectorConfiguration',
'Task',
'TaskControlFlowInfo',
'TaskReuseInfo',
'TensorflowConfiguration',
'TestDataSettings',
'Tool',
'ToolFuncResponse',
'ToolInputDynamicList',
'ToolInputGeneratedBy',
'ToolMetaDto',
'ToolSetting',
'ToolSourceMeta',
'TorchDistributedConfiguration',
'TrainingDiagnosticConfiguration',
'TrainingOutput',
'TrainingSettings',
'TriggerAsyncOperationStatus',
'TuningNodeRunSetting',
'TuningNodeSetting',
'TypedAssetReference',
'UIAzureOpenAIDeploymentNameSelector',
'UIAzureOpenAIModelCapabilities',
'UIColumnPicker',
'UIComputeSelection',
'UIHyperparameterConfiguration',
'UIInputSetting',
'UIJsonEditor',
'UIParameterHint',
'UIPromptFlowConnectionSelector',
'UIWidgetMetaInfo',
'UIYamlEditor',
'UnversionedEntityRequestDto',
'UnversionedEntityResponseDto',
'UnversionedRebuildIndexDto',
'UnversionedRebuildResponseDto',
'UpdateComponentRequest',
'UpdateFlowRequest',
'UpdateFlowRuntimeRequest',
'UpdateFlowStatusRequest',
'UpdateRegistryComponentRequest',
'UploadOptions',
'UriReference',
'User',
'UserAssignedIdentity',
'ValidationDataSettings',
'VariantIdentifier',
'VariantNode',
'Volume',
'WebServiceComputeMetaInfo',
'WebServicePort',
'Webhook',
'WorkspaceConnectionSpec',
'AEVAAssetType',
'AEVADataStoreMode',
'AEVAIdentityType',
'ActionType',
'AetherArgumentValueType',
'AetherAssetType',
'AetherBuildSourceType',
'AetherComputeType',
'AetherControlFlowType',
'AetherControlInputValue',
'AetherDataCopyMode',
'AetherDataLocationStorageType',
'AetherDataReferenceType',
'AetherDataStoreMode',
'AetherDataTransferStorageType',
'AetherDataTransferTaskType',
'AetherDatasetType',
'AetherEarlyTerminationPolicyType',
'AetherEntityStatus',
'AetherExecutionEnvironment',
'AetherExecutionPhase',
'AetherFeaturizationMode',
'AetherFileBasedPathType',
'AetherForecastHorizonMode',
'AetherIdentityType',
'AetherLogVerbosity',
'AetherModuleDeploymentSource',
'AetherModuleHashVersion',
'AetherModuleType',
'AetherNCrossValidationMode',
'AetherParameterType',
'AetherParameterValueType',
'AetherPrimaryMetrics',
'AetherRepositoryType',
'AetherResourceOperator',
'AetherResourceValueType',
'AetherSamplingAlgorithmType',
'AetherSeasonalityMode',
'AetherShortSeriesHandlingConfiguration',
'AetherStackMetaLearnerType',
'AetherStoredProcedureParameterType',
'AetherTabularTrainingMode',
'AetherTargetAggregationFunction',
'AetherTargetLagsMode',
'AetherTargetRollingWindowSizeMode',
'AetherTaskType',
'AetherTrainingOutputType',
'AetherUIScriptLanguageEnum',
'AetherUIWidgetTypeEnum',
'AetherUploadState',
'AetherUseStl',
'ApplicationEndpointType',
'ArgumentValueType',
'AssetScopeTypes',
'AssetSourceType',
'AssetType',
'AutoDeleteCondition',
'BuildContextLocationType',
'Communicator',
'ComponentRegistrationTypeEnum',
'ComponentType',
'ComputeEnvironmentType',
'ComputeTargetType',
'ComputeType',
'ConfigValueType',
'ConnectionCategory',
'ConnectionScope',
'ConnectionSourceType',
'ConnectionType',
'ConsumeMode',
'ControlFlowType',
'ControlInputValue',
'DataBindingMode',
'DataCategory',
'DataCopyMode',
'DataLocationStorageType',
'DataPortType',
'DataReferenceType',
'DataSourceType',
'DataStoreMode',
'DataTransferStorageType',
'DataTransferTaskType',
'DataTypeMechanism',
'DatasetAccessModes',
'DatasetConsumptionType',
'DatasetDeliveryMechanism',
'DatasetOutputType',
'DatasetType',
'DeliveryMechanism',
'DistributionParameterEnum',
'DistributionType',
'EarlyTerminationPolicyType',
'EmailNotificationEnableType',
'EndpointAuthMode',
'EntityKind',
'EntityStatus',
'ErrorHandlingMode',
'ExecutionPhase',
'FeaturizationMode',
'FlowFeatureStateEnum',
'FlowLanguage',
'FlowPatchOperationType',
'FlowRunMode',
'FlowRunStatusEnum',
'FlowRunTypeEnum',
'FlowTestMode',
'FlowType',
'ForecastHorizonMode',
'Framework',
'Frequency',
'GlobalJobDispatcherSupportedComputeType',
'GraphComponentsMode',
'GraphDatasetsLoadModes',
'GraphSdkCodeType',
'HttpStatusCode',
'IdentityType',
'InputType',
'IntellectualPropertyAccessMode',
'JobInputType',
'JobLimitsType',
'JobOutputType',
'JobProvisioningState',
'JobStatus',
'JobType',
'KeyType',
'ListViewType',
'LogLevel',
'LogVerbosity',
'LongRunningUpdateType',
'MLFlowAutologgerState',
'ManagedServiceIdentityType',
'MetricValueType',
'MfeInternalIdentityType',
'MfeInternalMLFlowAutologgerState',
'MfeInternalScheduleStatus',
'ModuleDtoFields',
'ModuleInfoFromYamlStatusEnum',
'ModuleRunSettingTypes',
'ModuleScope',
'ModuleSourceType',
'ModuleType',
'ModuleUpdateOperationType',
'ModuleWorkingMechanism',
'NCrossValidationMode',
'NodeCompositionMode',
'NodesValueType',
'Orientation',
'OutputMechanism',
'ParameterType',
'ParameterValueType',
'PipelineDraftMode',
'PipelineRunStatusCode',
'PipelineStatusCode',
'PipelineType',
'PortAction',
'PrimaryMetrics',
'PromptflowEngineType',
'ProvisioningState',
'RealTimeEndpointInternalStepCode',
'RealTimeEndpointOpCode',
'RealTimeEndpointOpStatusCode',
'RecurrenceFrequency',
'RunDisplayNameGenerationType',
'RunSettingParameterType',
'RunSettingUIWidgetTypeEnum',
'RunStatus',
'RunType',
'RuntimeStatusEnum',
'RuntimeType',
'SamplingAlgorithmType',
'ScheduleProvisioningStatus',
'ScheduleStatus',
'ScheduleType',
'ScopeType',
'ScriptType',
'SeasonalityMode',
'Section',
'SessionConfigModeEnum',
'SessionSetupModeEnum',
'SetupFlowSessionAction',
'SeverityLevel',
'ShortSeriesHandlingConfiguration',
'StackMetaLearnerType',
'StorageAuthType',
'StoredProcedureParameterType',
'SuccessfulCommandReturnCode',
'TabularTrainingMode',
'TargetAggregationFunction',
'TargetLagsMode',
'TargetRollingWindowSizeMode',
'TaskCreationOptions',
'TaskStatus',
'TaskStatusCode',
'TaskType',
'ToolFuncCallScenario',
'ToolState',
'ToolType',
'TrainingOutputType',
'TriggerOperationType',
'TriggerType',
'UIInputDataDeliveryMode',
'UIScriptLanguageEnum',
'UIWidgetTypeEnum',
'UploadState',
'UseStl',
'UserType',
'ValidationStatus',
'ValueType',
'VmPriority',
'WebServiceState',
'WeekDays',
'Weekday',
'YarnDeployMode',
]
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/models/__init__.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/models/__init__.py",
"repo_id": "promptflow",
"token_count": 33794
} | 19 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.2, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_attach_cosmos_account_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
overwrite = kwargs.pop('overwrite', False) # type: Optional[bool]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/TraceSessions/attachDb')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if overwrite is not None:
query_parameters['overwrite'] = _SERIALIZER.query("overwrite", overwrite, 'bool')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_cosmos_resource_token_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
container_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
acquire_write = kwargs.pop('acquire_write', False) # type: Optional[bool]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/TraceSessions/container/{containerName}/resourceToken')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"containerName": _SERIALIZER.url("container_name", container_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if acquire_write is not None:
query_parameters['acquireWrite'] = _SERIALIZER.query("acquire_write", acquire_write, 'bool')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class TraceSessionsOperations(object):
"""TraceSessionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~flow.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def attach_cosmos_account(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
overwrite=False, # type: Optional[bool]
body=None, # type: Optional["_models.AttachCosmosRequest"]
**kwargs # type: Any
):
# type: (...) -> Any
"""attach_cosmos_account.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param overwrite:
:type overwrite: bool
:param body:
:type body: ~flow.models.AttachCosmosRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: any, or the result of cls(response)
:rtype: any
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Any]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'AttachCosmosRequest')
else:
_json = None
request = build_attach_cosmos_account_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
overwrite=overwrite,
template_url=self.attach_cosmos_account.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
attach_cosmos_account.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/TraceSessions/attachDb'} # type: ignore
@distributed_trace
def get_cosmos_resource_token(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
container_name, # type: str
acquire_write=False, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> str
"""get_cosmos_resource_token.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param container_name:
:type container_name: str
:param acquire_write:
:type acquire_write: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_cosmos_resource_token_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
container_name=container_name,
acquire_write=acquire_write,
template_url=self.get_cosmos_resource_token.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_cosmos_resource_token.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/TraceSessions/container/{containerName}/resourceToken'} # type: ignore
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/operations/_trace_sessions_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/operations/_trace_sessions_operations.py",
"repo_id": "promptflow",
"token_count": 4169
} | 20 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# pylint: disable=protected-access
import copy
import json
import os
import re
from datetime import datetime
from functools import cached_property
from pathlib import Path
from typing import Dict, List, Optional, Union
import requests
from azure.ai.ml._artifacts._artifact_utilities import _check_and_upload_path
from azure.ai.ml._scope_dependent_operations import (
OperationConfig,
OperationsContainer,
OperationScope,
_ScopeDependentOperations,
)
from azure.ai.ml.constants._common import SHORT_URI_FORMAT
from azure.ai.ml.entities import Workspace
from azure.ai.ml.operations._operation_orchestrator import OperationOrchestrator
from azure.core.exceptions import HttpResponseError
from promptflow._sdk._constants import (
CLIENT_FLOW_TYPE_2_SERVICE_FLOW_TYPE,
DAG_FILE_NAME,
MAX_LIST_CLI_RESULTS,
WORKSPACE_LINKED_DATASTORE_NAME,
FlowType,
ListViewType,
)
from promptflow._sdk._errors import FlowOperationError
from promptflow._sdk._telemetry import ActivityType, WorkspaceTelemetryMixin, monitor_operation
from promptflow._sdk._utils import PromptflowIgnoreFile
from promptflow._sdk._vendor._asset_utils import traverse_directory
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.azure._constants._flow import DEFAULT_STORAGE
from promptflow.azure._entities._flow import Flow
from promptflow.azure._load_functions import load_flow
from promptflow.azure._restclient.flow_service_caller import FlowServiceCaller
from promptflow.azure.operations._artifact_utilities import _get_datastore_name, get_datastore_info
from promptflow.azure.operations._fileshare_storeage_helper import FlowFileStorageClient
from promptflow.exceptions import SystemErrorException, UserErrorException
logger = get_cli_sdk_logger()
class FlowOperations(WorkspaceTelemetryMixin, _ScopeDependentOperations):
"""FlowOperations that can manage flows.
You should not instantiate this class directly. Instead, you should
create a :class:`~promptflow.azure.PFClient` instance and this operation is available as the instance's attribute.
"""
_FLOW_RESOURCE_PATTERN = re.compile(r"azureml:.*?/workspaces/(?P<experiment_id>.*?)/flows/(?P<flow_id>.*?)$")
def __init__(
self,
operation_scope: OperationScope,
operation_config: OperationConfig,
all_operations: OperationsContainer,
credential,
service_caller: FlowServiceCaller,
workspace: Workspace,
**kwargs: Dict,
):
super().__init__(
operation_scope=operation_scope,
operation_config=operation_config,
workspace_name=operation_scope.workspace_name,
subscription_id=operation_scope.subscription_id,
resource_group_name=operation_scope.resource_group_name,
)
self._all_operations = all_operations
self._service_caller = service_caller
self._credential = credential
self._workspace = workspace
@cached_property
def _workspace_id(self):
return self._workspace._workspace_id
@cached_property
def _index_service_endpoint_url(self):
"""Get the endpoint url for the workspace."""
endpoint = self._service_caller._service_endpoint
return endpoint + "index/v1.0" + self._service_caller._common_azure_url_pattern
@monitor_operation(activity_name="pfazure.flows.create_or_update", activity_type=ActivityType.PUBLICAPI)
def create_or_update(self, flow: Union[str, Path], display_name=None, type=None, **kwargs) -> Flow:
"""Create a flow to remote from local source, or update the metadata of an existing flow.
.. note::
Functionality of updating flow metadata is yet to be supported.
:param flow: The source of the flow to create.
:type flow: Union[str, Path]
:param display_name: The display name of the flow to create. Default to be flow folder name + timestamp
if not specified. e.g. "web-classification-10-27-2023-14-19-10"
:type display_name: str
:param type: The type of the flow to create. One of ["standard", evaluation", "chat"].
Default to be "standard" if not specified.
:type type: str
:param description: The description of the flow to create. Default to be the description in flow yaml file.
:type description: str
:param tags: The tags of the flow to create. Default to be the tags in flow yaml file.
:type tags: Dict[str, str]
"""
# validate the parameters
azure_flow, flow_display_name, flow_type, kwargs = FlowOperations._validate_flow_creation_parameters(
flow, display_name, type, **kwargs
)
# upload to file share
file_share_flow_path = self._resolve_flow_code_and_upload_to_file_share(flow=azure_flow)
if not file_share_flow_path:
raise FlowOperationError(f"File share path should not be empty, got {file_share_flow_path!r}.")
# create flow to remote
flow_definition_file_path = f"{file_share_flow_path}/{DAG_FILE_NAME}"
rest_flow = self._create_remote_flow_via_file_share_path(
flow_display_name=flow_display_name,
flow_type=flow_type,
flow_definition_file_path=flow_definition_file_path,
**kwargs,
)
result_flow = Flow._from_pf_service(rest_flow)
flow_dict = result_flow._to_dict()
print(f"Flow created successfully:\n{json.dumps(flow_dict, indent=4)}")
return result_flow
@staticmethod
def _validate_flow_creation_parameters(source, flow_display_name=None, flow_type=None, **kwargs):
"""Validate the parameters for flow creation operation."""
# validate the source folder
logger.info("Validating flow source.")
if not Path(source, DAG_FILE_NAME).exists():
raise UserErrorException(
f"Flow source must be a directory with flow definition yaml '{DAG_FILE_NAME}'. "
f"Got {Path(source).resolve().as_posix()!r}."
)
# validate flow source with flow schema
logger.info("Validating flow schema.")
flow_dict = FlowOperations._validate_flow_schema(source, flow_display_name, flow_type, **kwargs)
logger.info("Validating flow creation parameters.")
flow = load_flow(source)
# if no flow name specified, use "flow name + timestamp"
flow_display_name = flow_dict.get("display_name", None)
if not flow_display_name:
flow_display_name = f"{Path(source).name}-{datetime.now().strftime('%m-%d-%Y-%H-%M-%S')}"
# if no flow type specified, use default flow type "standard"
flow_type = flow_dict.get("type", None)
if not flow_type:
flow_type = FlowType.STANDARD
# update description and tags to be the final value
description = flow_dict.get("description", None)
if isinstance(description, str):
kwargs["description"] = description
tags = flow_dict.get("tags", None)
if tags:
kwargs["tags"] = tags
return flow, flow_display_name, flow_type, kwargs
@staticmethod
def _validate_flow_schema(source, display_name=None, type=None, **kwargs):
"""Validate the flow schema."""
from promptflow._sdk.entities._flow import ProtectedFlow
params_override = copy.deepcopy(kwargs)
if display_name is not None:
params_override["display_name"] = display_name
if type is not None:
params_override["type"] = type
flow_entity = ProtectedFlow.load(source=source, params_override=params_override)
flow_entity._validate(raise_error=True) # raise error if validation failed
flow_dict = flow_entity._dump_for_validation()
return flow_dict
def _resolve_flow_code_and_upload_to_file_share(self, flow: Flow, ignore_tools_json=False) -> str:
remote_file_share_folder_name = f"{Path(flow.code).name}-{datetime.now().strftime('%m-%d-%Y-%H-%M-%S')}"
ops = OperationOrchestrator(self._all_operations, self._operation_scope, self._operation_config)
file_share_flow_path = ""
logger.info("Building flow code.")
with flow._build_code() as code:
if code is None:
raise FlowOperationError("Failed to build flow code.")
# ignore flow.tools.json if needed (e.g. for flow run scenario)
if ignore_tools_json:
ignore_file = code._ignore_file
if isinstance(ignore_file, PromptflowIgnoreFile):
ignore_file._ignore_tools_json = ignore_tools_json
else:
raise FlowOperationError(
message=f"Flow code should have PromptflowIgnoreFile, got {type(ignore_file)}"
)
code.datastore = DEFAULT_STORAGE
datastore_name = _get_datastore_name(datastore_name=DEFAULT_STORAGE)
datastore_operation = ops._code_assets._datastore_operation
datastore_info = get_datastore_info(datastore_operation, datastore_name)
logger.debug("Creating storage client for uploading flow to file share.")
storage_client = FlowFileStorageClient(
credential=datastore_info["credential"],
file_share_name=datastore_info["container_name"],
account_url=datastore_info["account_url"],
azure_cred=datastore_operation._credential,
)
# set storage client to flow operation, can be used in test case
self._storage_client = storage_client
# check if the file share directory exists
logger.debug("Checking if the file share directory exists.")
if storage_client._check_file_share_directory_exist(remote_file_share_folder_name):
raise FlowOperationError(
f"Remote flow folder {remote_file_share_folder_name!r} already exists under "
f"'{storage_client.file_share_prefix}'. Please change the flow folder name and try again."
)
try:
logger.info("Uploading flow directory to file share.")
storage_client.upload_dir(
source=code.path,
dest=remote_file_share_folder_name,
msg="test",
ignore_file=code._ignore_file,
show_progress=False,
)
except Exception as e:
raise FlowOperationError(f"Failed to upload flow to file share due to: {str(e)}.") from e
file_share_flow_path = f"{storage_client.file_share_prefix}/{remote_file_share_folder_name}"
logger.info(f"Successfully uploaded flow to file share path {file_share_flow_path!r}.")
return file_share_flow_path
def _create_remote_flow_via_file_share_path(
self, flow_display_name, flow_type, flow_definition_file_path, **kwargs
):
"""Create a flow to remote from file share path."""
service_flow_type = CLIENT_FLOW_TYPE_2_SERVICE_FLOW_TYPE[flow_type]
description = kwargs.get("description", None)
tags = kwargs.get("tags", None)
body = {
"flow_name": flow_display_name,
"flow_definition_file_path": flow_definition_file_path,
"flow_type": service_flow_type,
"description": description,
"tags": tags,
}
rest_flow_result = self._service_caller.create_flow(
subscription_id=self._operation_scope.subscription_id,
resource_group_name=self._operation_scope.resource_group_name,
workspace_name=self._operation_scope.workspace_name,
body=body,
)
return rest_flow_result
def get(self, name: str) -> Flow:
"""Get a flow from azure.
:param name: The name of the flow to get.
:type name: str
:return: The flow.
:rtype: ~promptflow.azure.entities.Flow
"""
try:
rest_flow = self._service_caller.get_flow(
subscription_id=self._operation_scope.subscription_id,
resource_group_name=self._operation_scope.resource_group_name,
workspace_name=self._operation_scope.workspace_name,
flow_id=name,
experiment_id=self._workspace_id, # for flow operations, current experiment id is workspace id
)
except HttpResponseError as e:
if e.status_code == 404:
raise FlowOperationError(f"Flow {name!r} not found.") from e
else:
raise FlowOperationError(f"Failed to get flow {name!r} due to: {str(e)}.") from e
flow = Flow._from_pf_service(rest_flow)
return flow
@monitor_operation(activity_name="pfazure.flows.list", activity_type=ActivityType.PUBLICAPI)
def list(
self,
max_results: int = MAX_LIST_CLI_RESULTS,
flow_type: Optional[FlowType] = None,
list_view_type: ListViewType = ListViewType.ACTIVE_ONLY,
include_others: bool = False,
**kwargs,
) -> List[Flow]:
"""List flows from azure.
:param max_results: The max number of runs to return, defaults to 50, max is 100
:type max_results: int
:param flow_type: The flow type, defaults to None, which means all flow types. Other supported flow types are
["standard", "evaluation", "chat"].
:type flow_type: Optional[FlowType]
:param list_view_type: The list view type, defaults to ListViewType.ACTIVE_ONLY
:type list_view_type: ListViewType
:param include_others: Whether to list flows owned by other users in the remote workspace, defaults to False
:type include_others: bool
:return: The list of flows.
:rtype: List[~promptflow.azure.entities.Flow]
"""
if not isinstance(max_results, int) or max_results < 1:
raise FlowOperationError(f"'max_results' must be a positive integer, got {max_results!r}")
normalized_flow_type = str(flow_type).lower()
if flow_type is not None and normalized_flow_type not in FlowType.get_all_values():
raise FlowOperationError(f"'flow_type' must be one of {FlowType.get_all_values()}, got {flow_type!r}.")
headers = self._service_caller._get_headers()
if list_view_type == ListViewType.ACTIVE_ONLY:
filter_archived = ["false"]
elif list_view_type == ListViewType.ARCHIVED_ONLY:
filter_archived = ["true"]
elif list_view_type == ListViewType.ALL:
filter_archived = ["true", "false"]
else:
raise FlowOperationError(
f"Invalid list view type: {list_view_type!r}, expecting one of ['ActiveOnly', 'ArchivedOnly', 'All']"
)
user_object_id, user_tenant_id = self._service_caller._get_user_identity_info()
payload = {
"filters": [
{"field": "type", "operator": "eq", "values": ["flows"]},
{"field": "annotations/isArchived", "operator": "eq", "values": filter_archived},
{
"field": "properties/creationContext/createdBy/userTenantId",
"operator": "eq",
"values": [user_tenant_id],
},
],
"freeTextSearch": "",
"order": [{"direction": "Desc", "field": "properties/creationContext/createdTime"}],
# index service can return 100 results at most
"pageSize": min(max_results, 100),
"skip": 0,
"includeTotalResultCount": True,
"searchBuilder": "AppendPrefix",
}
# add flow filter to only list flows from current user
if not include_others:
payload["filters"].append(
{
"field": "properties/creationContext/createdBy/userObjectId",
"operator": "eq",
"values": [user_object_id],
}
)
endpoint = self._index_service_endpoint_url
url = endpoint + "/entities"
response = requests.post(url, headers=headers, json=payload)
if response.status_code == 200:
entities = json.loads(response.text)
flow_entities = entities["value"]
else:
raise FlowOperationError(
f"Failed to get flows from index service. Code: {response.status_code}, text: {response.text}"
)
# transform to flow instances
flow_instances = []
for entity in flow_entities:
flow = Flow._from_index_service(entity)
flow_instances.append(flow)
return flow_instances
def _download(self, source, dest):
# TODO: support download flow
raise NotImplementedError("Not implemented yet")
def _resolve_arm_id_or_upload_dependencies(self, flow: Flow, ignore_tools_json=False) -> None:
ops = OperationOrchestrator(self._all_operations, self._operation_scope, self._operation_config)
# resolve flow's code
self._try_resolve_code_for_flow(flow=flow, ops=ops, ignore_tools_json=ignore_tools_json)
@classmethod
def _try_resolve_code_for_flow(cls, flow: Flow, ops: OperationOrchestrator, ignore_tools_json=False) -> None:
if flow.path:
# remote path
if flow.path.startswith("azureml://datastores"):
flow._code_uploaded = True
return
else:
raise ValueError("Path is required for flow.")
with flow._build_code() as code:
if code is None:
return
if flow._code_uploaded:
return
# TODO(2917889): generate flow meta for eager flow
if ignore_tools_json:
ignore_file = code._ignore_file
if isinstance(ignore_file, PromptflowIgnoreFile):
ignore_file._ignore_tools_json = ignore_tools_json
else:
raise SystemErrorException(
message=f"Flow code should have PromptflowIgnoreFile, got {type(ignore_file)}"
)
# flow directory per file upload summary
# as the upload logic locates in azure-ai-ml, we cannot touch during the upload
# copy the logic here to print per file upload summary
ignore_file = code._ignore_file
upload_paths = []
source_path = Path(code.path).resolve()
prefix = os.path.basename(source_path) + "/"
for root, _, files in os.walk(source_path, followlinks=True):
upload_paths += list(
traverse_directory(
root,
files,
prefix=prefix,
ignore_file=ignore_file,
)
)
ignore_files = code._ignore_file._get_ignore_list()
for file_path in ignore_files:
logger.debug(f"will ignore file: {file_path}...")
for file_path, _ in upload_paths:
logger.debug(f"will upload file: {file_path}...")
code.datastore = WORKSPACE_LINKED_DATASTORE_NAME
# NOTE: For flow directory upload, we prefer to upload it to the workspace linked datastore,
# therefore we will directly use _check_and_upload_path, instead of v2 SDK public API
# CodeOperations.create_or_update, as later one will upload the code asset to another
# container in the storage account, which may fail with vnet for MT.
# However, we might run into list secret permission error(especially in Heron workspace),
# in this case, we will leverage v2 SDK public API, which has solution for Heron,
# and request MT with the blob url;
# refer to except block for more details.
try:
uploaded_code_asset, _ = _check_and_upload_path(
artifact=code,
asset_operations=ops._code_assets,
artifact_type="Code",
datastore_name=WORKSPACE_LINKED_DATASTORE_NAME, # actually not work at all
show_progress=True,
)
path = uploaded_code_asset.path
path = path[path.find("LocalUpload") :] # path on container
flow.code = path
# azureml://datastores/workspaceblobstore/paths/<path-to-flow-dag-yaml>
flow.path = SHORT_URI_FORMAT.format(
WORKSPACE_LINKED_DATASTORE_NAME, (Path(path) / flow.path).as_posix()
)
except HttpResponseError as e:
# catch authorization error for list secret on datastore
if "AuthorizationFailed" in str(e) and "datastores/listSecrets/action" in str(e):
uploaded_code_asset = ops._code_assets.create_or_update(code)
path = uploaded_code_asset.path
path = path.replace(".blob.core.windows.net:443/", ".blob.core.windows.net/") # remove :443 port
flow.code = path
# https://<storage-account-name>.blob.core.windows.net/<container-name>/<path-to-flow-dag-yaml>
flow.path = f"{path}/{flow.path}"
else:
raise
flow._code_uploaded = True
# region deprecated but keep for runtime test dependencies
def _resolve_arm_id_or_upload_dependencies_to_file_share(self, flow: Flow) -> None:
ops = OperationOrchestrator(self._all_operations, self._operation_scope, self._operation_config)
# resolve flow's code
self._try_resolve_code_for_flow_to_file_share(flow=flow, ops=ops)
@classmethod
def _try_resolve_code_for_flow_to_file_share(cls, flow: Flow, ops: OperationOrchestrator) -> None:
from azure.ai.ml._utils._storage_utils import AzureMLDatastorePathUri
from ._artifact_utilities import _check_and_upload_path
if flow.path:
if flow.path.startswith("azureml://datastores"):
# remote path
path_uri = AzureMLDatastorePathUri(flow.path)
if path_uri.datastore != DEFAULT_STORAGE:
raise ValueError(f"Only {DEFAULT_STORAGE} is supported as remote storage for now.")
flow.path = path_uri.path
flow._code_uploaded = True
return
else:
raise ValueError("Path is required for flow.")
with flow._build_code() as code:
if code is None:
return
if flow._code_uploaded:
return
code.datastore = DEFAULT_STORAGE
uploaded_code_asset = _check_and_upload_path(
artifact=code,
asset_operations=ops._code_assets,
artifact_type="Code",
show_progress=False,
)
if "remote_path" in uploaded_code_asset:
path = uploaded_code_asset["remote_path"]
elif "remote path" in uploaded_code_asset:
path = uploaded_code_asset["remote path"]
flow.code = path
flow.path = (Path(path) / flow.path).as_posix()
flow._code_uploaded = True
# endregion
| promptflow/src/promptflow/promptflow/azure/operations/_flow_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/operations/_flow_operations.py",
"repo_id": "promptflow",
"token_count": 10428
} | 21 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import json
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
from promptflow._sdk._constants import VIS_JS_BUNDLE_FILENAME
@dataclass
class RunDetail:
flow_runs: List[dict]
node_runs: List[dict]
@dataclass
class RunMetadata:
name: str
display_name: str
create_time: str
flow_path: str
output_path: str
tags: Optional[List[Dict[str, str]]]
lineage: Optional[str]
metrics: Optional[Dict[str, Any]]
dag: Optional[str]
flow_tools_json: Optional[dict]
mode: Optional[str] = ""
@dataclass
class VisualizationConfig:
# use camel name here to fit contract requirement from js
availableIDEList: List[str]
@dataclass
class RunVisualization:
detail: List[RunDetail]
metadata: List[RunMetadata]
config: List[VisualizationConfig]
@dataclass
class VisualizationRender:
data: dict
js_path: str = VIS_JS_BUNDLE_FILENAME
def __post_init__(self):
self.data = json.dumps(json.dumps(self.data)) # double json.dumps to match JS requirements
| promptflow/src/promptflow/promptflow/contracts/_run_management.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/contracts/_run_management.py",
"repo_id": "promptflow",
"token_count": 421
} | 22 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import asyncio
import contextvars
import inspect
import threading
from concurrent import futures
from concurrent.futures import Future, ThreadPoolExecutor
from typing import Dict, List, Optional, Tuple
from promptflow._core.flow_execution_context import FlowExecutionContext
from promptflow._core.tools_manager import ToolsManager
from promptflow._utils.logger_utils import flow_logger
from promptflow._utils.utils import set_context
from promptflow.contracts.flow import Node
from promptflow.executor._dag_manager import DAGManager
from promptflow.executor._errors import LineExecutionTimeoutError, NoNodeExecutedError
RUN_FLOW_NODES_LINEARLY = 1
DEFAULT_CONCURRENCY_BULK = 2
DEFAULT_CONCURRENCY_FLOW = 16
class FlowNodesScheduler:
def __init__(
self,
tools_manager: ToolsManager,
inputs: Dict,
nodes_from_invoker: List[Node],
node_concurrency: int,
context: FlowExecutionContext,
) -> None:
self._tools_manager = tools_manager
self._future_to_node: Dict[Future, Node] = {}
self._node_concurrency = min(node_concurrency, DEFAULT_CONCURRENCY_FLOW)
flow_logger.info(f"Start to run {len(nodes_from_invoker)} nodes with concurrency level {node_concurrency}.")
self._dag_manager = DAGManager(nodes_from_invoker, inputs)
self._context = context
def wait_within_timeout(self, execution_event: threading.Event, timeout: int):
flow_logger.info(f"Timeout task is scheduled to wait for {timeout} seconds.")
signal = execution_event.wait(timeout=timeout)
if signal:
flow_logger.info("Timeout task is cancelled because the execution is finished.")
else:
flow_logger.warning(f"Timeout task timeouted after waiting for {timeout} seconds.")
def execute(
self,
line_timeout_sec: Optional[int] = None,
) -> Tuple[dict, dict]:
parent_context = contextvars.copy_context()
with ThreadPoolExecutor(
max_workers=self._node_concurrency, initializer=set_context, initargs=(parent_context,)
) as executor:
self._execute_nodes(executor)
timeout_task = None
event = threading.Event()
if line_timeout_sec is not None:
timeout_task = executor.submit(self.wait_within_timeout, event, line_timeout_sec)
try:
while not self._dag_manager.completed():
if not self._future_to_node:
raise NoNodeExecutedError("No nodes are ready for execution, but the flow is not completed.")
tasks_to_wait = list(self._future_to_node.keys())
if timeout_task is not None:
tasks_to_wait.append(timeout_task)
completed_futures_with_wait, _ = futures.wait(tasks_to_wait, return_when=futures.FIRST_COMPLETED)
completed_futures = [f for f in completed_futures_with_wait if f in self._future_to_node]
self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))
for each_future in completed_futures:
del self._future_to_node[each_future]
if timeout_task and timeout_task.done():
raise LineExecutionTimeoutError(self._context._line_number, line_timeout_sec)
self._execute_nodes(executor)
except Exception as e:
err_msg = "Flow execution has failed."
if isinstance(e, LineExecutionTimeoutError):
err_msg = f"Line execution timeout after {line_timeout_sec} seconds."
self._context.cancel_node_runs(err_msg)
node_names = ",".join(node.name for node in self._future_to_node.values())
flow_logger.error(f"{err_msg} Cancelling all running nodes: {node_names}.")
for unfinished_future in self._future_to_node.keys():
# We can't cancel running tasks here, only pending tasks could be cancelled.
unfinished_future.cancel()
# Even we raise exception here, still need to wait all running jobs finish to exit.
raise e
finally:
# Cancel timeout task no matter the execution is finished or failed.
event.set()
for node in self._dag_manager.bypassed_nodes:
self._dag_manager.completed_nodes_outputs[node] = None
return self._dag_manager.completed_nodes_outputs, self._dag_manager.bypassed_nodes
def _execute_nodes(self, executor: ThreadPoolExecutor):
# Skip nodes and update node run info until there are no nodes to bypass
nodes_to_bypass = self._dag_manager.pop_bypassable_nodes()
while nodes_to_bypass:
for node in nodes_to_bypass:
self._context.bypass_node(node)
nodes_to_bypass = self._dag_manager.pop_bypassable_nodes()
# Submit nodes that are ready to run
nodes_to_exec = self._dag_manager.pop_ready_nodes()
if nodes_to_exec:
self._submit_nodes(executor, nodes_to_exec)
def _collect_outputs(self, completed_futures: List[Future]):
completed_nodes_outputs = {}
for each_future in completed_futures:
each_node_result = each_future.result()
each_node = self._future_to_node[each_future]
completed_nodes_outputs[each_node.name] = each_node_result
return completed_nodes_outputs
def _submit_nodes(self, executor: ThreadPoolExecutor, nodes):
for each_node in nodes:
future = executor.submit(self._exec_single_node_in_thread, (each_node, self._dag_manager))
self._future_to_node[future] = each_node
def _exec_single_node_in_thread(self, args: Tuple[Node, DAGManager]):
node, dag_manager = args
# We are using same run tracker and cache manager for all threads, which may not thread safe.
# But for bulk run scenario, we've doing this for a long time, and it works well.
context = self._context
f = self._tools_manager.get_tool(node.name)
kwargs = dag_manager.get_node_valid_inputs(node, f)
if inspect.iscoroutinefunction(f):
# TODO: Run async functions in flow level event loop
result = asyncio.run(context.invoke_tool_async(node, f, kwargs=kwargs))
else:
result = context.invoke_tool(node, f, kwargs=kwargs)
return result
| promptflow/src/promptflow/promptflow/executor/_flow_nodes_scheduler.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/executor/_flow_nodes_scheduler.py",
"repo_id": "promptflow",
"token_count": 2820
} | 23 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from promptflow._core.tool import ToolInvoker
class DefaultToolInvoker(ToolInvoker):
def invoke_tool(self, f, *args, **kwargs):
return f(*args, **kwargs) # Do nothing
| promptflow/src/promptflow/promptflow/executor/_tool_invoker.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/executor/_tool_invoker.py",
"repo_id": "promptflow",
"token_count": 89
} | 24 |