text
stringlengths 13
1.77M
| id
stringlengths 22
127
| metadata
dict | __index_level_0__
int64 0
28
|
---|---|---|---|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from .activity import ( # noqa: F401
ActivityCompletionStatus,
ActivityType,
log_activity,
monitor_operation,
request_id_context,
)
from .logging_handler import PromptFlowSDKLogHandler, get_appinsights_log_handler # noqa: F401
from .telemetry import TelemetryMixin, WorkspaceTelemetryMixin, get_telemetry_logger, is_telemetry_enabled # noqa: F401
| promptflow/src/promptflow/promptflow/_sdk/_telemetry/__init__.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_telemetry/__init__.py",
"repo_id": "promptflow",
"token_count": 150
} | 14 |
# syntax=docker/dockerfile:1
FROM mcr.microsoft.com/dotnet/sdk:6.0 AS build
WORKDIR /
COPY ./flow /flow
COPY ./connections /connections
COPY ./start.sh /start.sh
FROM mcr.microsoft.com/dotnet/aspnet:6.0 AS runtime
COPY --from=build / /
ENV IS_IN_DOCKER="true"
EXPOSE 8080
RUN apt-get update && apt-get install -y runit
# reset runsvdir
RUN rm -rf /var/runit
COPY ./runit /var/runit
# grant permission
RUN chmod -R +x /var/runit
CMD ["bash", "./start.sh"]
| promptflow/src/promptflow/promptflow/_sdk/data/docker_csharp/Dockerfile.jinja2/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/data/docker_csharp/Dockerfile.jinja2",
"repo_id": "promptflow",
"token_count": 194
} | 15 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from os import PathLike
from pathlib import Path
from typing import Union
from promptflow._constants import LANGUAGE_KEY, FlowLanguage
from promptflow._sdk._constants import BASE_PATH_CONTEXT_KEY
from promptflow._sdk.entities._flow import FlowBase
from promptflow.exceptions import UserErrorException
class EagerFlow(FlowBase):
"""This class is used to represent an eager flow."""
def __init__(
self,
path: Union[str, PathLike],
code: Union[str, PathLike],
entry: str,
data: dict,
**kwargs,
):
# flow.dag.yaml file path or entry.py file path
path = Path(path)
# flow.dag.yaml file's folder or entry.py's folder
code = Path(code)
# entry function name
self.entry = entry
# TODO(2910062): support eager flow execution cache
super().__init__(data=data, path=path, code=code, content_hash=None, **kwargs)
@property
def language(self) -> str:
return self._data.get(LANGUAGE_KEY, FlowLanguage.Python)
@property
def additional_includes(self) -> list:
return self._data.get("additional_includes", [])
@classmethod
def _create_schema_for_validation(cls, context):
# import here to avoid circular import
from ..schemas._flow import EagerFlowSchema
return EagerFlowSchema(context=context)
@classmethod
def _load(cls, path: Path, data: dict, **kwargs):
# schema validation on unknown fields
data = cls._create_schema_for_validation(context={BASE_PATH_CONTEXT_KEY: path.parent}).load(data)
entry = data["entry"]
code = path.parent
if entry is None:
raise UserErrorException(f"Entry function is not specified for flow {path}")
return cls(path=path, code=code, entry=entry, data=data, **kwargs)
| promptflow/src/promptflow/promptflow/_sdk/entities/_eager_flow.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/entities/_eager_flow.py",
"repo_id": "promptflow",
"token_count": 742
} | 16 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import copy
import datetime
import json
import logging
import shutil
from dataclasses import asdict, dataclass
from functools import partial
from pathlib import Path
from typing import Any, Dict, List, NewType, Optional, Tuple, Union
from filelock import FileLock
from promptflow._sdk._constants import (
HOME_PROMPT_FLOW_DIR,
LINE_NUMBER,
LOCAL_STORAGE_BATCH_SIZE,
PROMPT_FLOW_DIR_NAME,
LocalStorageFilenames,
RunInfoSources,
)
from promptflow._sdk._errors import BulkRunException, InvalidRunError
from promptflow._sdk._load_functions import load_flow
from promptflow._sdk._utils import (
PromptflowIgnoreFile,
generate_flow_tools_json,
json_dump,
json_load,
json_loads_parse_const_as_str,
pd_read_json,
read_open,
write_open,
)
from promptflow._sdk.entities import Run
from promptflow._sdk.entities._eager_flow import EagerFlow
from promptflow._sdk.entities._flow import Flow
from promptflow._utils.dataclass_serializer import serialize
from promptflow._utils.exception_utils import PromptflowExceptionPresenter
from promptflow._utils.logger_utils import LogContext, get_cli_sdk_logger
from promptflow._utils.multimedia_utils import get_file_reference_encoder, resolve_multimedia_data_recursively
from promptflow._utils.yaml_utils import load_yaml
from promptflow.batch._result import BatchResult
from promptflow.contracts.multimedia import Image
from promptflow.contracts.run_info import FlowRunInfo
from promptflow.contracts.run_info import RunInfo as NodeRunInfo
from promptflow.contracts.run_info import Status
from promptflow.contracts.run_mode import RunMode
from promptflow.exceptions import UserErrorException
from promptflow.storage import AbstractBatchRunStorage
logger = get_cli_sdk_logger()
RunInputs = NewType("RunInputs", Dict[str, List[Any]])
RunOutputs = NewType("RunOutputs", Dict[str, List[Any]])
RunMetrics = NewType("RunMetrics", Dict[str, Any])
@dataclass
class LoggerOperations(LogContext):
stream: bool = False
@property
def log_path(self) -> str:
return str(self.file_path)
def get_logs(self) -> str:
with read_open(self.file_path) as f:
return f.read()
def _get_execute_loggers_list(cls) -> List[logging.Logger]:
result = super()._get_execute_loggers_list()
result.append(logger)
return result
def get_initializer(self):
return partial(
LoggerOperations,
file_path=self.file_path,
run_mode=self.run_mode,
credential_list=self.credential_list,
stream=self.stream,
)
def __enter__(self):
log_path = Path(self.log_path)
log_path.parent.mkdir(parents=True, exist_ok=True)
if self.run_mode == RunMode.Batch:
log_path.touch(exist_ok=True)
else:
if log_path.exists():
# for non batch run, clean up previous log content
try:
with write_open(log_path) as file:
file.truncate(0)
except Exception as e:
logger.warning(f"Failed to clean up the previous log content because {e}")
else:
log_path.touch()
for _logger in self._get_execute_loggers_list():
for handler in _logger.handlers:
if self.stream is False and isinstance(handler, logging.StreamHandler):
handler.setLevel(logging.CRITICAL)
super().__enter__()
def __exit__(self, *args):
super().__exit__(*args)
for _logger in self._get_execute_loggers_list():
for handler in _logger.handlers:
if self.stream is False and isinstance(handler, logging.StreamHandler):
handler.setLevel(logging.CRITICAL)
@dataclass
class NodeRunRecord:
NodeName: str
line_number: int
run_info: str
start_time: datetime
end_time: datetime
status: str
@staticmethod
def from_run_info(node_run_info: NodeRunInfo) -> "NodeRunRecord":
return NodeRunRecord(
NodeName=node_run_info.node,
line_number=node_run_info.index,
run_info=serialize(node_run_info),
start_time=node_run_info.start_time.isoformat(),
end_time=node_run_info.end_time.isoformat(),
status=node_run_info.status.value,
)
def dump(self, path: Path, run_name: str) -> None:
# for nodes in first line run and all reduce nodes, the target filename is 000000000.jsonl
# so we need to handle concurrent write with file lock
filename_need_lock = "0".zfill(LocalStorageOperations.LINE_NUMBER_WIDTH) + ".jsonl"
if path.name == filename_need_lock:
file_lock_path = (HOME_PROMPT_FLOW_DIR / f"{run_name}.{self.NodeName}.lock").resolve()
lock = FileLock(file_lock_path)
lock.acquire()
try:
json_dump(asdict(self), path)
finally:
lock.release()
else:
# for normal nodes in other line runs, directly write
json_dump(asdict(self), path)
@dataclass
class LineRunRecord:
line_number: int
run_info: str
start_time: datetime.datetime
end_time: datetime.datetime
name: str
description: str
status: str
tags: str
@staticmethod
def from_flow_run_info(flow_run_info: FlowRunInfo) -> "LineRunRecord":
return LineRunRecord(
line_number=flow_run_info.index,
run_info=serialize(flow_run_info),
start_time=flow_run_info.start_time.isoformat(),
end_time=flow_run_info.end_time.isoformat(),
name=flow_run_info.name,
description=flow_run_info.description,
status=flow_run_info.status.value,
tags=flow_run_info.tags,
)
def dump(self, path: Path) -> None:
json_dump(asdict(self), path)
class LocalStorageOperations(AbstractBatchRunStorage):
"""LocalStorageOperations."""
LINE_NUMBER_WIDTH = 9
def __init__(self, run: Run, stream=False, run_mode=RunMode.Test):
self._run = run
self.path = self._prepare_folder(self._run._output_path)
self.logger = LoggerOperations(
file_path=self.path / LocalStorageFilenames.LOG, stream=stream, run_mode=run_mode
)
# snapshot
self._snapshot_folder_path = self._prepare_folder(self.path / LocalStorageFilenames.SNAPSHOT_FOLDER)
self._dag_path = self._snapshot_folder_path / LocalStorageFilenames.DAG
self._flow_tools_json_path = (
self._snapshot_folder_path / PROMPT_FLOW_DIR_NAME / LocalStorageFilenames.FLOW_TOOLS_JSON
)
self._inputs_path = self.path / LocalStorageFilenames.INPUTS # keep this for other usages
# below inputs and outputs are dumped by SDK
self._sdk_inputs_path = self._inputs_path
self._sdk_output_path = self.path / LocalStorageFilenames.OUTPUTS
# metrics
self._metrics_path = self.path / LocalStorageFilenames.METRICS
# legacy files: detail.json and outputs.jsonl(not the one in flow_outputs folder)
self._detail_path = self.path / LocalStorageFilenames.DETAIL
self._legacy_outputs_path = self.path / LocalStorageFilenames.OUTPUTS
# for line run records, store per line
# for normal node run records, store per node per line;
# for reduce node run records, store centralized in 000000000.jsonl per node
self.outputs_folder = self._prepare_folder(self.path / "flow_outputs")
self._outputs_path = self.outputs_folder / "output.jsonl" # dumped by executor
self._node_infos_folder = self._prepare_folder(self.path / "node_artifacts")
self._run_infos_folder = self._prepare_folder(self.path / "flow_artifacts")
self._data_path = Path(run.data) if run.data is not None else None
self._meta_path = self.path / LocalStorageFilenames.META
self._exception_path = self.path / LocalStorageFilenames.EXCEPTION
self._dump_meta_file()
self._eager_mode = self._calculate_eager_mode(run)
self._loaded_flow_run_info = {} # {line_number: flow_run_info}
self._loaded_node_run_info = {} # {line_number: [node_run_info]}
@property
def eager_mode(self) -> bool:
return self._eager_mode
@classmethod
def _calculate_eager_mode(cls, run: Run) -> bool:
if run._run_source == RunInfoSources.LOCAL:
try:
flow_obj = load_flow(source=run.flow)
return isinstance(flow_obj, EagerFlow)
except Exception as e:
# For run with incomplete flow snapshot, ignore load flow error to make sure it can still show.
logger.debug(f"Failed to load flow from {run.flow} due to {e}.")
return False
elif run._run_source in [RunInfoSources.INDEX_SERVICE, RunInfoSources.RUN_HISTORY]:
return run._properties.get("azureml.promptflow.run_mode") == "Eager"
# TODO(2901279): support eager mode for run created from run folder
return False
def delete(self) -> None:
def on_rmtree_error(func, path, exc_info):
raise InvalidRunError(f"Failed to delete run {self.path} due to {exc_info[1]}.")
shutil.rmtree(path=self.path, onerror=on_rmtree_error)
def _dump_meta_file(self) -> None:
json_dump({"batch_size": LOCAL_STORAGE_BATCH_SIZE}, self._meta_path)
def dump_snapshot(self, flow: Flow) -> None:
"""Dump flow directory to snapshot folder, input file will be dumped after the run."""
patterns = [pattern for pattern in PromptflowIgnoreFile.IGNORE_FILE]
# ignore current output parent folder to avoid potential recursive copy
patterns.append(self._run._output_path.parent.name)
shutil.copytree(
flow.code.as_posix(),
self._snapshot_folder_path,
ignore=shutil.ignore_patterns(*patterns),
dirs_exist_ok=True,
)
# replace DAG file with the overwrite one
if not self._eager_mode:
self._dag_path.unlink()
shutil.copy(flow.path, self._dag_path)
def load_dag_as_string(self) -> str:
if self._eager_mode:
return ""
with read_open(self._dag_path) as f:
return f.read()
def load_flow_tools_json(self) -> dict:
if self._eager_mode:
# no tools json for eager mode
return {}
if not self._flow_tools_json_path.is_file():
return generate_flow_tools_json(self._snapshot_folder_path, dump=False)
else:
return json_load(self._flow_tools_json_path)
def load_io_spec(self) -> Tuple[Dict[str, Dict[str, str]], Dict[str, Dict[str, str]]]:
"""Load input/output spec from DAG."""
# TODO(2898455): support eager mode
with read_open(self._dag_path) as f:
flow_dag = load_yaml(f)
return flow_dag["inputs"], flow_dag["outputs"]
def load_inputs(self) -> RunInputs:
df = pd_read_json(self._inputs_path)
return df.to_dict("list")
def load_outputs(self) -> RunOutputs:
# for legacy run, simply read the output file and return as list of dict
if not self._outputs_path.is_file():
df = pd_read_json(self._legacy_outputs_path)
return df.to_dict("list")
df = pd_read_json(self._outputs_path)
if len(df) > 0:
df = df.set_index(LINE_NUMBER)
return df.to_dict("list")
def dump_inputs_and_outputs(self) -> None:
inputs, outputs = self._collect_io_from_debug_info()
with write_open(self._sdk_inputs_path) as f:
inputs.to_json(f, orient="records", lines=True, force_ascii=False)
with write_open(self._sdk_output_path) as f:
outputs.to_json(f, orient="records", lines=True, force_ascii=False)
def dump_metrics(self, metrics: Optional[RunMetrics]) -> None:
metrics = metrics or dict()
json_dump(metrics, self._metrics_path)
def dump_exception(self, exception: Exception, batch_result: BatchResult) -> None:
"""Dump exception to local storage.
:param exception: Exception raised during bulk run.
:param batch_result: Bulk run outputs. If exception not raised, store line run error messages.
"""
# extract line run errors
errors = []
if batch_result:
for line_error in batch_result.error_summary.error_list:
errors.append(line_error.to_dict())
# collect aggregation node error
for node_name, aggr_error in batch_result.error_summary.aggr_error_dict.items():
errors.append({"error": aggr_error, "aggregation_node_name": node_name})
if errors:
try:
# use first line run error message as exception message if no exception raised
error = errors[0]
message = error["error"]["message"]
except Exception:
message = (
"Failed to extract error message from line runs. "
f"Please check {self._outputs_path} for more info."
)
elif exception and isinstance(exception, UserErrorException):
# SystemError will be raised above and users can see it, so we don't need to dump it.
message = str(exception)
else:
return
if not isinstance(exception, BulkRunException):
# If other errors raised, pass it into PromptflowException
exception = BulkRunException(
message=message,
error=exception,
failed_lines=batch_result.failed_lines if batch_result else "unknown",
total_lines=batch_result.total_lines if batch_result else "unknown",
errors={"errors": errors},
)
json_dump(PromptflowExceptionPresenter.create(exception).to_dict(include_debug_info=True), self._exception_path)
def load_exception(self) -> Dict:
try:
return json_load(self._exception_path)
except Exception:
return {}
def load_detail(self, parse_const_as_str: bool = False) -> Dict[str, list]:
if self._detail_path.is_file():
# legacy run with local file detail.json, then directly load from the file
return json_load(self._detail_path)
else:
flow_runs = self._load_all_flow_run_info(parse_const_as_str=parse_const_as_str)
node_runs = self._load_all_node_run_info(parse_const_as_str=parse_const_as_str)
return {"flow_runs": flow_runs, "node_runs": node_runs}
def load_metrics(self, *, parse_const_as_str: bool = False) -> Dict[str, Union[int, float, str]]:
return json_load(self._metrics_path, parse_const_as_str=parse_const_as_str)
def persist_node_run(self, run_info: NodeRunInfo) -> None:
"""Persist node run record to local storage."""
node_folder = self._prepare_folder(self._node_infos_folder / run_info.node)
self._persist_run_multimedia(run_info, node_folder)
node_run_record = NodeRunRecord.from_run_info(run_info)
# for reduce nodes, the line_number is None, store the info in the 000000000.jsonl
# align with AzureMLRunStorageV2, which is a storage contract with PFS
line_number = 0 if node_run_record.line_number is None else node_run_record.line_number
filename = f"{str(line_number).zfill(self.LINE_NUMBER_WIDTH)}.jsonl"
node_run_record.dump(node_folder / filename, run_name=self._run.name)
def _load_info_from_file(self, file_path, parse_const_as_str: bool = False):
json_loads = json.loads if not parse_const_as_str else json_loads_parse_const_as_str
run_infos = []
if file_path.suffix.lower() == ".jsonl":
with read_open(file_path) as f:
run_infos = [json_loads(line)["run_info"] for line in list(f)]
return run_infos
def _load_all_node_run_info(self, parse_const_as_str: bool = False) -> List[Dict]:
node_run_infos = []
for node_folder in sorted(self._node_infos_folder.iterdir()):
for node_run_record_file in sorted(node_folder.iterdir()):
new_runs = self._load_info_from_file(node_run_record_file, parse_const_as_str)
node_run_infos.extend(new_runs)
for new_run in new_runs:
new_run = resolve_multimedia_data_recursively(node_run_record_file, new_run)
run_info = NodeRunInfo.deserialize(new_run)
line_number = run_info.index
self._loaded_node_run_info[line_number] = self._loaded_node_run_info.get(line_number, [])
self._loaded_node_run_info[line_number].append(run_info)
return node_run_infos
def load_node_run_info_for_line(self, line_number: int = None) -> List[NodeRunInfo]:
if not self._loaded_node_run_info:
self._load_all_node_run_info()
return self._loaded_node_run_info.get(line_number)
def persist_flow_run(self, run_info: FlowRunInfo) -> None:
"""Persist line run record to local storage."""
if not Status.is_terminated(run_info.status):
logger.info("Line run is not terminated, skip persisting line run record.")
return
self._persist_run_multimedia(run_info, self._run_infos_folder)
line_run_record = LineRunRecord.from_flow_run_info(run_info)
# calculate filename according to the batch size
# note that if batch_size > 1, need to well handle concurrent write scenario
lower_bound = line_run_record.line_number // LOCAL_STORAGE_BATCH_SIZE * LOCAL_STORAGE_BATCH_SIZE
upper_bound = lower_bound + LOCAL_STORAGE_BATCH_SIZE - 1
filename = (
f"{str(lower_bound).zfill(self.LINE_NUMBER_WIDTH)}_"
f"{str(upper_bound).zfill(self.LINE_NUMBER_WIDTH)}.jsonl"
)
line_run_record.dump(self._run_infos_folder / filename)
def _load_all_flow_run_info(self, parse_const_as_str: bool = False) -> List[Dict]:
flow_run_infos = []
for line_run_record_file in sorted(self._run_infos_folder.iterdir()):
new_runs = self._load_info_from_file(line_run_record_file, parse_const_as_str)
flow_run_infos.extend(new_runs)
for new_run in new_runs:
new_run = resolve_multimedia_data_recursively(line_run_record_file, new_run)
run_info = FlowRunInfo.deserialize(new_run)
line_number = run_info.index
self._loaded_flow_run_info[line_number] = run_info
return flow_run_infos
def load_flow_run_info(self, line_number: int = None) -> FlowRunInfo:
if not self._loaded_flow_run_info:
self._load_all_flow_run_info()
return self._loaded_flow_run_info.get(line_number)
def persist_result(self, result: Optional[BatchResult]) -> None:
"""Persist metrics from return of executor."""
if result is None:
return
self.dump_inputs_and_outputs()
self.dump_metrics(result.metrics)
def _persist_run_multimedia(self, run_info: Union[FlowRunInfo, NodeRunInfo], folder_path: Path):
if run_info.inputs:
run_info.inputs = self._serialize_multimedia(run_info.inputs, folder_path)
if run_info.output:
run_info.output = self._serialize_multimedia(run_info.output, folder_path)
run_info.result = None
if run_info.api_calls:
run_info.api_calls = self._serialize_multimedia(run_info.api_calls, folder_path)
def _serialize_multimedia(self, value, folder_path: Path, relative_path: Path = None):
pfbytes_file_reference_encoder = get_file_reference_encoder(folder_path, relative_path, use_absolute_path=True)
serialization_funcs = {Image: partial(Image.serialize, **{"encoder": pfbytes_file_reference_encoder})}
return serialize(value, serialization_funcs=serialization_funcs)
@staticmethod
def _prepare_folder(path: Union[str, Path]) -> Path:
path = Path(path)
path.mkdir(parents=True, exist_ok=True)
return path
@staticmethod
def _outputs_padding(df: "DataFrame", inputs_line_numbers: List[int]) -> "DataFrame":
import pandas as pd
if len(df) == len(inputs_line_numbers):
return df
missing_lines = []
lines_set = set(df[LINE_NUMBER].values)
for i in inputs_line_numbers:
if i not in lines_set:
missing_lines.append({LINE_NUMBER: i})
df_to_append = pd.DataFrame(missing_lines)
res = pd.concat([df, df_to_append], ignore_index=True)
res = res.sort_values(by=LINE_NUMBER, ascending=True)
return res
def load_inputs_and_outputs(self) -> Tuple["DataFrame", "DataFrame"]:
if not self._sdk_inputs_path.is_file() or not self._sdk_output_path.is_file():
inputs, outputs = self._collect_io_from_debug_info()
else:
inputs = pd_read_json(self._sdk_inputs_path)
outputs = pd_read_json(self._sdk_output_path)
# if all line runs are failed, no need to fill
if len(outputs) > 0:
outputs = self._outputs_padding(outputs, inputs[LINE_NUMBER].tolist())
outputs.fillna(value="(Failed)", inplace=True) # replace nan with explicit prompt
outputs = outputs.set_index(LINE_NUMBER)
return inputs, outputs
def _collect_io_from_debug_info(self) -> Tuple["DataFrame", "DataFrame"]:
import pandas as pd
inputs, outputs = [], []
for line_run_record_file in sorted(self._run_infos_folder.iterdir()):
if line_run_record_file.suffix.lower() != ".jsonl":
continue
with read_open(line_run_record_file) as f:
datas = [json.loads(line) for line in list(f)]
for data in datas:
line_number: int = data[LINE_NUMBER]
line_run_info: dict = data["run_info"]
current_inputs = line_run_info.get("inputs")
current_outputs = line_run_info.get("output")
inputs.append(copy.deepcopy(current_inputs))
if current_outputs is not None:
current_outputs[LINE_NUMBER] = line_number
outputs.append(copy.deepcopy(current_outputs))
return pd.DataFrame(inputs), pd.DataFrame(outputs)
| promptflow/src/promptflow/promptflow/_sdk/operations/_local_storage_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/operations/_local_storage_operations.py",
"repo_id": "promptflow",
"token_count": 10022
} | 17 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import io
import re
from jinja2 import Template
from .yaml_utils import dump_yaml, load_yaml_string
def generate_custom_strong_type_connection_spec(cls, package, package_version):
connection_spec = {
"connectionCategory": "CustomKeys",
"flowValueType": "CustomConnection",
"connectionType": cls.__name__,
"ConnectionTypeDisplayName": cls.__name__,
"configSpecs": [],
"module": cls.__module__,
"package": package,
"package_version": package_version,
}
for k, typ in cls.__annotations__.items():
spec = {
"name": k,
"displayName": k.replace("_", " ").title(),
"configValueType": typ.__name__,
}
if hasattr(cls, k):
spec["isOptional"] = getattr(cls, k, None) is not None
else:
spec["isOptional"] = False
connection_spec["configSpecs"].append(spec)
return connection_spec
def generate_custom_strong_type_connection_template(cls, connection_spec, package, package_version):
connection_template_str = """
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/CustomStrongTypeConnection.schema.json
name: "to_replace_with_connection_name"
type: custom
custom_type: {{ custom_type }}
module: {{ module }}
package: {{ package }}
package_version: {{ package_version }}
configs:{% for key, value in configs.items() %}
{{ key }}: "{{ value -}}"{% endfor %}
secrets: # must-have{% for key, value in secrets.items() %}
{{ key }}: "{{ value -}}"{% endfor %}
"""
connection_template = Template(connection_template_str)
# Extract configs and secrets
configs = {}
secrets = {}
for spec in connection_spec["configSpecs"]:
if spec["configValueType"] == "Secret":
secrets[spec["name"]] = "to_replace_with_" + spec["name"].replace("-", "_")
else:
configs[spec["name"]] = getattr(cls, spec["name"], None) or "to_replace_with_" + spec["name"].replace(
"-", "_"
)
# Prepare data for template
data = {
"custom_type": cls.__name__,
"module": cls.__module__,
"package": package,
"package_version": package_version,
"configs": configs,
"secrets": secrets,
}
connection_template_with_data = connection_template.render(data)
connection_template_with_comments = render_comments(
connection_template_with_data, cls, secrets.keys(), configs.keys()
)
return connection_template_with_comments
def render_comments(connection_template, cls, secrets, configs):
if cls.__doc__ is not None:
data = load_yaml_string(connection_template)
comments_map = extract_comments_mapping(list(secrets) + list(configs), cls.__doc__)
# Add comments for secret keys
for key in secrets:
if key in comments_map.keys():
data["secrets"].yaml_add_eol_comment(comments_map[key] + "\n", key)
# Add comments for config keys
for key in configs:
if key in comments_map.keys():
data["configs"].yaml_add_eol_comment(comments_map[key] + "\n", key)
# Dump data object back to string
buf = io.StringIO()
dump_yaml(data, buf)
connection_template_with_comments = buf.getvalue()
return connection_template_with_comments
return connection_template
def extract_comments_mapping(keys, doc):
comments_map = {}
for key in keys:
try:
param_pattern = rf":param {key}: (.*)"
key_description = " ".join(re.findall(param_pattern, doc))
type_pattern = rf":type {key}: (.*)"
key_type = " ".join(re.findall(type_pattern, doc)).rstrip(".")
if key_type and key_description:
comments_map[key] = " ".join([key_type + " type.", key_description])
elif key_type:
comments_map[key] = key_type + " type."
elif key_description:
comments_map[key] = key_description
except re.error:
print("An error occurred when extract comments mapping.")
return comments_map
| promptflow/src/promptflow/promptflow/_utils/connection_utils.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_utils/connection_utils.py",
"repo_id": "promptflow",
"token_count": 1814
} | 18 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import contextvars
import logging
import threading
from promptflow._utils.utils import set_context
class RepeatLogTimer(threading.Timer):
"""Repeat to log message every interval seconds until it is cancelled."""
def __init__(
self, interval_seconds: float, logger: logging.Logger, level: int, log_message_function, args: tuple = None
):
self._logger = logger
self._level = level
self._log_message_function = log_message_function
self._function_args = args if args else tuple()
self._context = contextvars.copy_context()
super().__init__(interval_seconds, function=None)
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
self.cancel()
def run(self):
"""Override Timer.run method."""
# Set context variables from parent context.
set_context(self._context)
while not self.finished.wait(self.interval):
if not self.finished.is_set():
msgs = self._log_message_function(*self._function_args)
for msg in msgs:
self._logger.log(self._level, msg)
self.finished.set()
| promptflow/src/promptflow/promptflow/_utils/thread_utils.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_utils/thread_utils.py",
"repo_id": "promptflow",
"token_count": 505
} | 19 |
# How to automatically generate the REST client code
Rest client code in this folder are not manually written, but generated by autorest.
## Setup
+ install [nodejs](https://nodejs.org/en)
+ install autorest
+ run `npm install -g autorest`
## Download swagger.json
Download swagger.json from [here](https://int.api.azureml-test.ms/flow/swagger/v1.0/swagger.json) to
[promptflow/azure/_restclient](../promptflow/azure/_restclient)
## Update code
+ cd to [promptflow/azure/_restclient](../promptflow/azure/_restclient)
+ run `autorest --v3 --python --track2 --version=3.8.0 --use=@autorest/[email protected] --input-file=swagger.json --output-folder=. --namespace=flow --modelerfour.lenient-model-deduplication`
+ don't change `--use`. latest version of `autorest/python` will generate code following different pattern, which is not compatible with our code.
## Update the generation history
- 2023.11.13 - [Update SDK restclient](https://github.com/microsoft/promptflow/pull/1101).
- 2023.12.18 - [Remove data portal url from the result of pfazure run show](https://github.com/microsoft/promptflow/pull/1497)
- 2024.2.2 - [Support specify compute instance as session compute](https://github.com/microsoft/promptflow/pull/1925)
- 2024.2.5 - [Support retrieve Cosmos token](https://github.com/microsoft/promptflow/pull/1972)
## Troubleshooting
### Duplicate object schemas with "xxx" name detected.
This may be caused by the duplicate generated class names.
```json
"FlowFeature": {
"type": "object",
"properties": {
"name": {
"type": "string",
"nullable": true
},
"description": {
"type": "string",
"nullable": true
},
"state": {
"type": "object",
"properties": {
"Runtime": {
"$ref": "#/components/schemas/FlowFeatureState"
},
"Executor": {
"$ref": "#/components/schemas/FlowFeatureState"
},
"PFS": {
"$ref": "#/components/schemas/FlowFeatureState"
}
},
"additionalProperties": false,
"nullable": true
}
},
"additionalProperties": false
},
"FlowFeatureState": {
"enum": [
"Ready",
"E2ETest"
],
"type": "string"
},
```
`FlowFeature` has a nested object field `state`, which will be generated to a new class named `FlowFeatureState`, and it duplicates with the enum `FlowFeatureState`.
To fix this, server side needs to change the class name in the schema, in this case, server side changed the object `state` to `states` and the problem is resolved.
| promptflow/src/promptflow/promptflow/azure/_restclient/README.md/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/README.md",
"repo_id": "promptflow",
"token_count": 901
} | 20 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.2, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._flow_runtimes_operations import build_check_ci_availability_request, build_check_mir_availability_request, build_check_runtime_upgrade_request, build_create_runtime_request, build_delete_runtime_request, build_get_runtime_capability_request, build_get_runtime_latest_config_request, build_get_runtime_request, build_list_runtimes_request, build_update_runtime_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class FlowRuntimesOperations:
"""FlowRuntimesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~flow.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def create_runtime(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
runtime_name: str,
async_call: Optional[bool] = False,
msi_token: Optional[bool] = False,
skip_port_check: Optional[bool] = False,
body: Optional["_models.CreateFlowRuntimeRequest"] = None,
**kwargs: Any
) -> "_models.FlowRuntimeDto":
"""create_runtime.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param runtime_name:
:type runtime_name: str
:param async_call:
:type async_call: bool
:param msi_token:
:type msi_token: bool
:param skip_port_check:
:type skip_port_check: bool
:param body:
:type body: ~flow.models.CreateFlowRuntimeRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowRuntimeDto, or the result of cls(response)
:rtype: ~flow.models.FlowRuntimeDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRuntimeDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateFlowRuntimeRequest')
else:
_json = None
request = build_create_runtime_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
runtime_name=runtime_name,
content_type=content_type,
json=_json,
async_call=async_call,
msi_token=msi_token,
skip_port_check=skip_port_check,
template_url=self.create_runtime.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FlowRuntimeDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_runtime.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}'} # type: ignore
@distributed_trace_async
async def update_runtime(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
runtime_name: str,
async_call: Optional[bool] = False,
msi_token: Optional[bool] = False,
skip_port_check: Optional[bool] = False,
body: Optional["_models.UpdateFlowRuntimeRequest"] = None,
**kwargs: Any
) -> "_models.FlowRuntimeDto":
"""update_runtime.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param runtime_name:
:type runtime_name: str
:param async_call:
:type async_call: bool
:param msi_token:
:type msi_token: bool
:param skip_port_check:
:type skip_port_check: bool
:param body:
:type body: ~flow.models.UpdateFlowRuntimeRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowRuntimeDto, or the result of cls(response)
:rtype: ~flow.models.FlowRuntimeDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRuntimeDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'UpdateFlowRuntimeRequest')
else:
_json = None
request = build_update_runtime_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
runtime_name=runtime_name,
content_type=content_type,
json=_json,
async_call=async_call,
msi_token=msi_token,
skip_port_check=skip_port_check,
template_url=self.update_runtime.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FlowRuntimeDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_runtime.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}'} # type: ignore
@distributed_trace_async
async def get_runtime(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
runtime_name: str,
**kwargs: Any
) -> "_models.FlowRuntimeDto":
"""get_runtime.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param runtime_name:
:type runtime_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowRuntimeDto, or the result of cls(response)
:rtype: ~flow.models.FlowRuntimeDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRuntimeDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_runtime_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
runtime_name=runtime_name,
template_url=self.get_runtime.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FlowRuntimeDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_runtime.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}'} # type: ignore
@distributed_trace_async
async def delete_runtime(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
runtime_name: str,
async_call: Optional[bool] = False,
msi_token: Optional[bool] = False,
**kwargs: Any
) -> "_models.FlowRuntimeDto":
"""delete_runtime.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param runtime_name:
:type runtime_name: str
:param async_call:
:type async_call: bool
:param msi_token:
:type msi_token: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowRuntimeDto, or the result of cls(response)
:rtype: ~flow.models.FlowRuntimeDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRuntimeDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_runtime_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
runtime_name=runtime_name,
async_call=async_call,
msi_token=msi_token,
template_url=self.delete_runtime.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FlowRuntimeDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_runtime.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}'} # type: ignore
@distributed_trace_async
async def check_ci_availability(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
compute_instance_name: str,
custom_app_name: str,
**kwargs: Any
) -> "_models.AvailabilityResponse":
"""check_ci_availability.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param compute_instance_name:
:type compute_instance_name: str
:param custom_app_name:
:type custom_app_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilityResponse, or the result of cls(response)
:rtype: ~flow.models.AvailabilityResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilityResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_check_ci_availability_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
compute_instance_name=compute_instance_name,
custom_app_name=custom_app_name,
template_url=self.check_ci_availability.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('AvailabilityResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_ci_availability.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/checkCiAvailability'} # type: ignore
@distributed_trace_async
async def check_mir_availability(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
deployment_name: str,
**kwargs: Any
) -> "_models.AvailabilityResponse":
"""check_mir_availability.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param endpoint_name:
:type endpoint_name: str
:param deployment_name:
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilityResponse, or the result of cls(response)
:rtype: ~flow.models.AvailabilityResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilityResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_check_mir_availability_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
deployment_name=deployment_name,
template_url=self.check_mir_availability.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('AvailabilityResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_mir_availability.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/checkMirAvailability'} # type: ignore
@distributed_trace_async
async def check_runtime_upgrade(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
runtime_name: str,
**kwargs: Any
) -> bool:
"""check_runtime_upgrade.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param runtime_name:
:type runtime_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[bool]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_check_runtime_upgrade_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
runtime_name=runtime_name,
template_url=self.check_runtime_upgrade.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('bool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_runtime_upgrade.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}/needUpgrade'} # type: ignore
@distributed_trace_async
async def get_runtime_capability(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
runtime_name: str,
**kwargs: Any
) -> "_models.FlowRuntimeCapability":
"""get_runtime_capability.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param runtime_name:
:type runtime_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowRuntimeCapability, or the result of cls(response)
:rtype: ~flow.models.FlowRuntimeCapability
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRuntimeCapability"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_runtime_capability_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
runtime_name=runtime_name,
template_url=self.get_runtime_capability.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FlowRuntimeCapability', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_runtime_capability.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}/capability'} # type: ignore
@distributed_trace_async
async def get_runtime_latest_config(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> "_models.RuntimeConfiguration":
"""get_runtime_latest_config.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RuntimeConfiguration, or the result of cls(response)
:rtype: ~flow.models.RuntimeConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RuntimeConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_runtime_latest_config_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.get_runtime_latest_config.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('RuntimeConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_runtime_latest_config.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/latestConfig'} # type: ignore
@distributed_trace_async
async def list_runtimes(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> List["_models.FlowRuntimeDto"]:
"""list_runtimes.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of FlowRuntimeDto, or the result of cls(response)
:rtype: list[~flow.models.FlowRuntimeDto]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.FlowRuntimeDto"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_runtimes_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.list_runtimes.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[FlowRuntimeDto]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_runtimes.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes'} # type: ignore
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/operations/_flow_runtimes_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/operations/_flow_runtimes_operations.py",
"repo_id": "promptflow",
"token_count": 11768
} | 21 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.2, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_submit_bulk_run_async_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
bulk_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
error_handling_mode = kwargs.pop('error_handling_mode', None) # type: Optional[Union[str, "_models.ErrorHandlingMode"]]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRunsAdmin/{flowId}/bulkRuns/{bulkRunId}/submit')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
"bulkRunId": _SERIALIZER.url("bulk_run_id", bulk_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if error_handling_mode is not None:
query_parameters['errorHandlingMode'] = _SERIALIZER.query("error_handling_mode", error_handling_mode, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_send_policy_validation_async_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
bulk_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRunsAdmin/{flowId}/bulkRuns/{bulkRunId}/policy')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
"bulkRunId": _SERIALIZER.url("bulk_run_id", bulk_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_check_policy_validation_async_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
bulk_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRunsAdmin/{flowId}/bulkRuns/{bulkRunId}/policy')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
"bulkRunId": _SERIALIZER.url("bulk_run_id", bulk_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_log_result_for_bulk_run_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
bulk_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRunsAdmin/{flowId}/bulkRuns/{bulkRunId}/LogResult')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
"bulkRunId": _SERIALIZER.url("bulk_run_id", bulk_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_storage_info_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRunsAdmin/storageInfo')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_log_flow_run_event_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
flow_run_id, # type: str
runtime_version, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRunsAdmin/{flowId}/flowRuns/{flowRunId}/runtime/{runtimeVersion}/logEvent')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
"runtimeVersion": _SERIALIZER.url("runtime_version", runtime_version, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_log_flow_run_event_v2_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
runtime_version = kwargs.pop('runtime_version', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRunsAdmin/{flowId}/flowRuns/{flowRunId}/logEvent')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if runtime_version is not None:
query_parameters['runtimeVersion'] = _SERIALIZER.query("runtime_version", runtime_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_log_flow_run_terminated_event_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
last_checked_time = kwargs.pop('last_checked_time', None) # type: Optional[datetime.datetime]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRunsAdmin/{flowId}/flowRuns/{flowRunId}/logTerminatedEvent')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if last_checked_time is not None:
query_parameters['lastCheckedTime'] = _SERIALIZER.query("last_checked_time", last_checked_time, 'iso-8601')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_service_logs_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
bulk_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRunsAdmin/{flowId}/bulkRuns/{bulkRunId}/serviceLogs')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
"bulkRunId": _SERIALIZER.url("bulk_run_id", bulk_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_batch_update_service_logs_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
bulk_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRunsAdmin/{flowId}/bulkRuns/{bulkRunId}/serviceLogs/batch')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
"bulkRunId": _SERIALIZER.url("bulk_run_id", bulk_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
# fmt: on
class FlowRunsAdminOperations(object):
"""FlowRunsAdminOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~flow.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def submit_bulk_run_async(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
bulk_run_id, # type: str
error_handling_mode=None, # type: Optional[Union[str, "_models.ErrorHandlingMode"]]
**kwargs # type: Any
):
# type: (...) -> "_models.SubmitBulkRunResponse"
"""submit_bulk_run_async.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param bulk_run_id:
:type bulk_run_id: str
:param error_handling_mode:
:type error_handling_mode: str or ~flow.models.ErrorHandlingMode
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SubmitBulkRunResponse, or the result of cls(response)
:rtype: ~flow.models.SubmitBulkRunResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SubmitBulkRunResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_submit_bulk_run_async_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
bulk_run_id=bulk_run_id,
error_handling_mode=error_handling_mode,
template_url=self.submit_bulk_run_async.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SubmitBulkRunResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
submit_bulk_run_async.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRunsAdmin/{flowId}/bulkRuns/{bulkRunId}/submit'} # type: ignore
@distributed_trace
def send_policy_validation_async(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
bulk_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PolicyValidationResponse"
"""send_policy_validation_async.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param bulk_run_id:
:type bulk_run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyValidationResponse, or the result of cls(response)
:rtype: ~flow.models.PolicyValidationResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyValidationResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_send_policy_validation_async_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
bulk_run_id=bulk_run_id,
template_url=self.send_policy_validation_async.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('PolicyValidationResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
send_policy_validation_async.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRunsAdmin/{flowId}/bulkRuns/{bulkRunId}/policy'} # type: ignore
@distributed_trace
def check_policy_validation_async(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
bulk_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PolicyValidationResponse"
"""check_policy_validation_async.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param bulk_run_id:
:type bulk_run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyValidationResponse, or the result of cls(response)
:rtype: ~flow.models.PolicyValidationResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyValidationResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_check_policy_validation_async_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
bulk_run_id=bulk_run_id,
template_url=self.check_policy_validation_async.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('PolicyValidationResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_policy_validation_async.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRunsAdmin/{flowId}/bulkRuns/{bulkRunId}/policy'} # type: ignore
@distributed_trace
def log_result_for_bulk_run(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
bulk_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> List["_models.KeyValuePairStringObject"]
"""log_result_for_bulk_run.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param bulk_run_id:
:type bulk_run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of KeyValuePairStringObject, or the result of cls(response)
:rtype: list[~flow.models.KeyValuePairStringObject]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.KeyValuePairStringObject"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_log_result_for_bulk_run_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
bulk_run_id=bulk_run_id,
template_url=self.log_result_for_bulk_run.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[KeyValuePairStringObject]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
log_result_for_bulk_run.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRunsAdmin/{flowId}/bulkRuns/{bulkRunId}/LogResult'} # type: ignore
@distributed_trace
def get_storage_info(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.StorageInfo"
"""get_storage_info.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageInfo, or the result of cls(response)
:rtype: ~flow.models.StorageInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_storage_info_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.get_storage_info.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('StorageInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_storage_info.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRunsAdmin/storageInfo'} # type: ignore
@distributed_trace
def log_flow_run_event(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
flow_run_id, # type: str
runtime_version, # type: str
**kwargs # type: Any
):
# type: (...) -> str
"""log_flow_run_event.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param flow_run_id:
:type flow_run_id: str
:param runtime_version:
:type runtime_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_log_flow_run_event_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
flow_run_id=flow_run_id,
runtime_version=runtime_version,
template_url=self.log_flow_run_event.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
log_flow_run_event.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRunsAdmin/{flowId}/flowRuns/{flowRunId}/runtime/{runtimeVersion}/logEvent'} # type: ignore
@distributed_trace
def log_flow_run_event_v2(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
flow_run_id, # type: str
runtime_version=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> str
"""log_flow_run_event_v2.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param flow_run_id:
:type flow_run_id: str
:param runtime_version:
:type runtime_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_log_flow_run_event_v2_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
flow_run_id=flow_run_id,
runtime_version=runtime_version,
template_url=self.log_flow_run_event_v2.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
log_flow_run_event_v2.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRunsAdmin/{flowId}/flowRuns/{flowRunId}/logEvent'} # type: ignore
@distributed_trace
def log_flow_run_terminated_event(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
flow_run_id, # type: str
last_checked_time=None, # type: Optional[datetime.datetime]
**kwargs # type: Any
):
# type: (...) -> "_models.LogRunTerminatedEventDto"
"""log_flow_run_terminated_event.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param flow_run_id:
:type flow_run_id: str
:param last_checked_time:
:type last_checked_time: ~datetime.datetime
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LogRunTerminatedEventDto, or the result of cls(response)
:rtype: ~flow.models.LogRunTerminatedEventDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LogRunTerminatedEventDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_log_flow_run_terminated_event_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
flow_run_id=flow_run_id,
last_checked_time=last_checked_time,
template_url=self.log_flow_run_terminated_event.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('LogRunTerminatedEventDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
log_flow_run_terminated_event.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRunsAdmin/{flowId}/flowRuns/{flowRunId}/logTerminatedEvent'} # type: ignore
@distributed_trace
def update_service_logs(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
bulk_run_id, # type: str
body=None, # type: Optional["_models.ServiceLogRequest"]
**kwargs # type: Any
):
# type: (...) -> "_models.Task"
"""update_service_logs.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param bulk_run_id:
:type bulk_run_id: str
:param body:
:type body: ~flow.models.ServiceLogRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Task, or the result of cls(response)
:rtype: ~flow.models.Task
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Task"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'ServiceLogRequest')
else:
_json = None
request = build_update_service_logs_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
bulk_run_id=bulk_run_id,
content_type=content_type,
json=_json,
template_url=self.update_service_logs.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('Task', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_service_logs.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRunsAdmin/{flowId}/bulkRuns/{bulkRunId}/serviceLogs'} # type: ignore
@distributed_trace
def batch_update_service_logs(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
bulk_run_id, # type: str
body=None, # type: Optional[List["_models.ServiceLogRequest"]]
**kwargs # type: Any
):
# type: (...) -> "_models.Task"
"""batch_update_service_logs.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param bulk_run_id:
:type bulk_run_id: str
:param body:
:type body: list[~flow.models.ServiceLogRequest]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Task, or the result of cls(response)
:rtype: ~flow.models.Task
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Task"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, '[ServiceLogRequest]')
else:
_json = None
request = build_batch_update_service_logs_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
bulk_run_id=bulk_run_id,
content_type=content_type,
json=_json,
template_url=self.batch_update_service_logs.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('Task', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
batch_update_service_logs.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRunsAdmin/{flowId}/bulkRuns/{bulkRunId}/serviceLogs/batch'} # type: ignore
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/operations/_flow_runs_admin_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/operations/_flow_runs_admin_operations.py",
"repo_id": "promptflow",
"token_count": 18701
} | 22 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import re
class BulkRunURL:
"""Parser for a flow run URL."""
REGEX_PATTERN = ".*prompts/flow/([^/]+)/([^/]+)/bulktest/([^/]+).*"
RUN_URL_FORMAT = (
"https://ml.azure.com/prompts/flow/{}/{}/bulktest/{}/details?wsid="
"/subscriptions/{}/resourcegroups/{}/providers/Microsoft.MachineLearningServices/workspaces/{}"
)
def __init__(self, url: str):
if url:
match = re.match(self.REGEX_PATTERN, url)
if match:
self.experiment_id = match.group(1)
self.flow_id = match.group(2)
self.bulk_test_id = match.group(3)
else:
raise ValueError("Invalid flow run URL: {}".format(url))
@classmethod
def get_url(cls, experiment_id, flow_id, bulk_test_id, subscription_id, resource_group, workspace_name):
return cls.RUN_URL_FORMAT.format(
experiment_id, flow_id, bulk_test_id, subscription_id, resource_group, workspace_name
)
class BulkRunId:
"""Parser for a flow run ID."""
REGEX_PATTERN = "azureml://experiment/([^/]+)/flow/([^/]+)/bulktest/([^/]+)(/run/[^/]+)?"
RUN_ID_FORMAT = "azureml://experiment/{}/flow/{}/bulktest/{}"
def __init__(self, arm_id: str):
if arm_id:
match = re.match(self.REGEX_PATTERN, arm_id)
if match:
self.experiment_id = match.group(1)
self.flow_id = match.group(2)
self.bulk_test_id = match.group(3)
if len(match.groups()) > 3:
self.run_id = match.group(4).split("/")[-1].strip()
else:
self.run_id = None
else:
raise ValueError("Invalid flow run ID: {}".format(arm_id))
@classmethod
def get_url(cls, experiment_id, flow_id, bulk_test_id, *, run_id=None):
arm_id = cls.RUN_ID_FORMAT.format(experiment_id, flow_id, bulk_test_id)
if run_id:
arm_id += "/run/{}".format(run_id)
return arm_id
| promptflow/src/promptflow/promptflow/azure/_utils/_url_utils.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_utils/_url_utils.py",
"repo_id": "promptflow",
"token_count": 1057
} | 23 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from pathlib import Path
from typing import Any, List, Mapping, Optional
from promptflow.batch._base_executor_proxy import APIBasedExecutorProxy
from promptflow.executor._result import AggregationResult
EXECUTOR_SERVICE_DLL = "Promptflow.dll"
class CSharpBaseExecutorProxy(APIBasedExecutorProxy):
"""Base class for csharp executor proxy for local and runtime."""
def __init__(
self,
*,
working_dir: Path = None,
enable_stream_output: bool = False,
):
super().__init__(working_dir=working_dir, enable_stream_output=enable_stream_output)
async def exec_aggregation_async(
self,
batch_inputs: Mapping[str, Any],
aggregation_inputs: Mapping[str, Any],
run_id: Optional[str] = None,
) -> AggregationResult:
# TODO: aggregation is not supported for now?
return AggregationResult({}, {}, {})
@classmethod
def _construct_service_startup_command(
cls,
port,
log_path,
error_file_path,
yaml_path: str = "flow.dag.yaml",
log_level: str = "Warning",
assembly_folder: str = ".",
) -> List[str]:
return [
"dotnet",
EXECUTOR_SERVICE_DLL,
"--execution_service",
"--port",
port,
"--yaml_path",
yaml_path,
"--assembly_folder",
assembly_folder,
"--log_path",
log_path,
"--log_level",
log_level,
"--error_file_path",
error_file_path,
]
| promptflow/src/promptflow/promptflow/batch/_csharp_base_executor_proxy.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/batch/_csharp_base_executor_proxy.py",
"repo_id": "promptflow",
"token_count": 781
} | 24 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# isort: skip_file
# skip to avoid circular import
__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
from promptflow._sdk.entities._connection import (
AzureContentSafetyConnection,
AzureOpenAIConnection,
CognitiveSearchConnection,
CustomConnection,
OpenAIConnection,
SerpConnection,
QdrantConnection,
FormRecognizerConnection,
)
from promptflow._sdk.entities._run import Run
from promptflow._core.tool import InputSetting, DynamicList
from promptflow._sdk.entities._flow import FlowContext
__all__ = [
# region Connection
"AzureContentSafetyConnection",
"AzureOpenAIConnection",
"OpenAIConnection",
"CustomConnection",
"CognitiveSearchConnection",
"SerpConnection",
"QdrantConnection",
"FormRecognizerConnection",
# endregion
# region Run
"Run",
# endregion
# region Tool
"InputSetting",
"DynamicList",
# endregion
# region Flow
"FlowContext",
# endregion
]
| promptflow/src/promptflow/promptflow/entities/__init__.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/entities/__init__.py",
"repo_id": "promptflow",
"token_count": 384
} | 25 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from dataclasses import dataclass
from datetime import datetime
from promptflow.contracts.run_info import RunInfo
@dataclass
class CacheRecord:
run_id: str
hash_id: str
flow_run_id: str
flow_id: str
cache_string: str
end_time: datetime
class AbstractCacheStorage:
def get_cache_record_list(hash_id: str) -> CacheRecord:
pass
def persist_cache_result(run_info: RunInfo):
pass
| promptflow/src/promptflow/promptflow/storage/_cache_storage.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/storage/_cache_storage.py",
"repo_id": "promptflow",
"token_count": 189
} | 26 |
FROM python:3.9-slim-bullseye AS base
RUN set -x
RUN apt-get update \
&& apt-get -y install curl \
&& apt-get -y install net-tools \
&& apt-get -y install procps \
&& apt-get -y install build-essential \
&& apt-get -y install docker.io
RUN pip install ipython ipykernel
RUN ipython kernel install --user --name promptflow
# FROM base AS promptflow
COPY requirements.txt .
RUN pip install -r requirements.txt
RUN set +x
CMD bash
| promptflow/.devcontainer/Dockerfile/0 | {
"file_path": "promptflow/.devcontainer/Dockerfile",
"repo_id": "promptflow",
"token_count": 166
} | 0 |
# Support
## How to file issues and get help
This project uses GitHub Issues to track bugs and feature requests. Please search the existing
issues before filing new issues to avoid duplicates. For new issues, file your bug or
feature request as a new Issue.
## Microsoft Support Policy
Support for this **PROJECT or PRODUCT** is limited to the resources listed above.
| promptflow/SUPPORT.md/0 | {
"file_path": "promptflow/SUPPORT.md",
"repo_id": "promptflow",
"token_count": 84
} | 1 |
# Concepts
In this section, you will learn the basic concepts of prompt flow.
```{toctree}
:maxdepth: 1
concept-flows
concept-tools
concept-connections
concept-variants
design-principles
``` | promptflow/docs/concepts/index.md/0 | {
"file_path": "promptflow/docs/concepts/index.md",
"repo_id": "promptflow",
"token_count": 61
} | 2 |
# Adding Category and Tags for Tool
This document is dedicated to guiding you through the process of categorizing and tagging your tools for optimal organization and efficiency. Categories help you organize your tools into specific folders, making it much easier to find what you need. Tags, on the other hand, work like labels that offer more detailed descriptions. They enable you to quickly search and filter tools based on specific characteristics or functions. By using categories and tags, you'll not only tailor your tool library to your preferences but also save time by effortlessly finding the right tool for any task.
| Attribute | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| category | str | No | Organizes tools into folders by common features. |
| tags | dict | No | Offers detailed, searchable descriptions of tools through key-value pairs. |
**Important Notes:**
- Tools without an assigned category will be listed in the root folder.
- Tools lacking tags will display an empty tags field.
## Prerequisites
- Please ensure that your [Prompt flow for VS Code](https://marketplace.visualstudio.com/items?itemName=prompt-flow.prompt-flow) is updated to version 1.1.0 or later.
## How to add category and tags for a tool
Run the command below in your tool project directory to automatically generate your tool YAML, use _-c_ or _--category_ to add category, and use _--tags_ to add tags for your tool:
```
python <promptflow github repo>\scripts\tool\generate_package_tool_meta.py -m <tool_module> -o <tool_yaml_path> --category <tool_category> --tags <tool_tags>
```
Here, we use [an existing tool](https://github.com/microsoft/promptflow/tree/main/examples/tools/tool-package-quickstart/my_tool_package/yamls/my_tool_1.yaml) as an example. If you wish to create your own tool, please refer to the [create and use tool package](create-and-use-tool-package.md#create-custom-tool-package) guide.
```
cd D:\proj\github\promptflow\examples\tools\tool-package-quickstart
python D:\proj\github\promptflow\scripts\tool\generate_package_tool_meta.py -m my_tool_package.tools.my_tool_1 -o my_tool_package\yamls\my_tool_1.yaml --category "test_tool" --tags "{'tag1':'value1','tag2':'value2'}"
```
In the auto-generated tool YAML file, the category and tags are shown as below:
```yaml
my_tool_package.tools.my_tool_1.my_tool:
function: my_tool
inputs:
connection:
type:
- CustomConnection
input_text:
type:
- string
module: my_tool_package.tools.my_tool_1
name: My First Tool
description: This is my first tool
type: python
# Category and tags are shown as below.
category: test_tool
tags:
tag1: value1
tag2: value2
```
## Tool with category and tags experience in VS Code extension
Follow the [steps](create-and-use-tool-package.md#use-your-tool-from-vscode-extension) to use your tool via the VS Code extension.
- Experience in the tool tree
![category_and_tags_in_tool_tree](../../media/how-to-guides/develop-a-tool/category_and_tags_in_tool_tree.png)
- Experience in the tool list
By clicking `More` in the visual editor, you can view your tools along with their category and tags:
![category_and_tags_in_tool_list](../../media/how-to-guides/develop-a-tool/category_and_tags_in_tool_list.png)
Furthermore, you have the option to search or filter tools based on tags:
![filter_tools_by_tag](../../media/how-to-guides/develop-a-tool/filter_tools_by_tag.png) | promptflow/docs/how-to-guides/develop-a-tool/add-category-and-tags-for-tool.md/0 | {
"file_path": "promptflow/docs/how-to-guides/develop-a-tool/add-category-and-tags-for-tool.md",
"repo_id": "promptflow",
"token_count": 1073
} | 3 |
# Quick Start
This guide will walk you through the fist step using of prompt flow code-first experience.
**Prerequisite** - To make the most of this tutorial, you'll need:
- Know how to program with Python :)
- A basic understanding of Machine Learning can be beneficial, but it's not mandatory.
**Learning Objectives** - Upon completing this tutorial, you should learn how to:
- Setup your python environment to run prompt flow
- Clone a sample flow & understand what's a flow
- Understand how to edit the flow using visual editor or yaml
- Test the flow using your favorite experience: CLI, SDK or VS Code Extension.
## Set up your dev environment
1. A python environment with version `python=3.9` or higher version like 3.10. It's recommended to use python environment manager [miniconda](https://docs.conda.io/en/latest/miniconda.html). After you have installed miniconda, run below commands to create a python environment:
```bash
conda create --name pf python=3.9
conda activate pf
```
2. Install `promptflow` and `promptflow-tools`.
```sh
pip install promptflow promptflow-tools
```
3. Check the installation.
```bash
# should print promptflow version, e.g. "0.1.0b3"
pf -v
```
## Understand what's a flow
A flow, represented as a YAML file, is a DAG of functions, which is connected via input/output dependencies, and executed based on the topology by prompt flow executor. See [Flows](../../concepts/concept-flows.md) for more details.
### Get the flow sample
Clone the sample repo and check flows in folder [examples/flows](https://github.com/microsoft/promptflow/tree/main/examples/flows).
```bash
git clone https://github.com/microsoft/promptflow.git
```
### Understand flow directory
The sample used in this tutorial is the [web-classification](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard/web-classification) flow, which categorizes URLs into several predefined classes. Classification is a traditional machine learning task, and this sample illustrates how to perform classification using GPT and prompts.
```bash
cd promptflow/examples/flows/standard/web-classification
```
A flow directory is a directory that contains all contents of a flow. Structure of flow folder:
- **flow.dag.yaml**: The flow definition with inputs/outputs, nodes, tools and variants for authoring purpose.
- **.promptflow/flow.tools.json**: It contains tools meta referenced in `flow.dag.yaml`.
- **Source code files (.py, .jinja2)**: User managed, the code scripts referenced by tools.
- **requirements.txt**: Python package dependencies for this flow.
![flow_dir](../media/how-to-guides/quick-start/flow_directory.png)
In order to run this specific flow, you need to install its requirements first.
```sh
pip install -r requirements.txt
```
### Understand the flow yaml
The entry file of a flow directory is [`flow.dag.yaml`](https://github.com/microsoft/promptflow/blob/main/examples/flows/standard/web-classification/flow.dag.yaml) which describes the `DAG(Directed Acyclic Graph)` of a flow. Below is a sample of flow DAG:
![flow_dag](../media/how-to-guides/quick-start/flow_dag.png)
This graph is rendered by VS Code extension which will be introduced in the next section.
### Using VS Code Extension to visualize the flow
_Note: Prompt flow VS Code Extension is highly recommended for flow development and debugging._
1. Prerequisites for VS Code extension.
- Install latest stable version of [VS Code](https://code.visualstudio.com/)
- Install [VS Code Python extension](https://marketplace.visualstudio.com/items?itemName=ms-python.python)
2. Install [Prompt flow for VS Code extension](https://marketplace.visualstudio.com/items?itemName=prompt-flow.prompt-flow)
3. Select python interpreter
![vscode](../media/how-to-guides/quick-start/vs_code_interpreter_0.png)
![vscode](../media/how-to-guides/quick-start/vs_code_interpreter_1.png)
2. Open dag in vscode. You can open the `flow.dag.yaml` as yaml file, or you can also open it in `visual editor`.
![vscode](../media/how-to-guides/quick-start/vs_code_dag_0.png)
## Develop and test your flow
### How to edit the flow
To test your flow with varying input data, you have the option to modify the default input. If you are well-versed with the structure, you may also add or remove nodes to alter the flow's arrangement.
```yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs:
url:
type: string
# change the default value of input url here
default: https://play.google.com/store/apps/details?id=com.twitter.android
...
```
See more details of this topic in [Develop a flow](./develop-a-flow/index.md).
### Create necessary connections
:::{note}
If you are using `WSL` or other OS without default keyring storage backend, you may encounter `StoreConnectionEncryptionKeyError`, please refer to [FAQ](./faq.md#connection-creation-failed-with-storeconnectionencryptionkeyerror) for the solutions.
:::
The [`connection`](../concepts/concept-connections.md) helps securely store and manage secret keys or other sensitive credentials required for interacting with LLM and other external tools for example Azure Content Safety.
The sample flow [web-classification](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard/web-classification) uses connection `open_ai_connection` inside, e.g. `classify_with_llm` node needs to talk to `llm` using the connection.
We need to set up the connection if we haven't added it before. Once created, the connection will be stored in local db and can be used in any flow.
::::{tab-set}
:::{tab-item} CLI
:sync: CLI
Firstly we need a connection yaml file `connection.yaml`:
If you are using Azure Open AI, prepare your resource follow with this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one.
```yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/AzureOpenAIConnection.schema.json
name: open_ai_connection
type: azure_open_ai
api_key: <test_key>
api_base: <test_base>
api_type: azure
api_version: <test_version>
```
If you are using OpenAI, sign up account via [OpenAI website](https://openai.com/), login and [find personal API key](https://platform.openai.com/account/api-keys), then use this yaml:
```yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/OpenAIConnection.schema.json
name: open_ai_connection
type: open_ai
api_key: "<user-input>"
organization: "" # optional
```
Then we can use CLI command to create the connection.
```sh
pf connection create -f connection.yaml
```
More command details can be found in [CLI reference](../reference/pf-command-reference.md).
:::
:::{tab-item} SDK
:sync: SDK
In SDK, connections can be created and managed with `PFClient`.
```python
from promptflow import PFClient
from promptflow.entities import AzureOpenAIConnection
# PFClient can help manage your runs and connections.
pf = PFClient()
try:
conn_name = "open_ai_connection"
conn = pf.connections.get(name=conn_name)
print("using existing connection")
except:
connection = AzureOpenAIConnection(
name=conn_name,
api_key="<test_key>",
api_base="<test_base>",
api_type="azure",
api_version="<test_version>",
)
# use this if you have an existing OpenAI account
# from promptflow.entities import OpenAIConnection
# connection = OpenAIConnection(
# name=conn_name,
# api_key="<user-input>",
# )
conn = pf.connections.create_or_update(connection)
print("successfully created connection")
print(conn)
```
:::
:::{tab-item} VS Code Extension
:sync: VS Code Extension
1. Click the promptflow icon to enter promptflow control panel
![vsc_add_connection](../media/how-to-guides/quick-start/vs_code_connection_0.png)
2. Create your connection.
![vsc_add_connection](../media/how-to-guides/quick-start/vs_code_connection_1.png)
![vsc_add_connection](../media/how-to-guides/quick-start/vs_code_connection_2.png)
![vsc_add_connection](../media/how-to-guides/quick-start/vs_code_connection_3.png)
:::
::::
Learn more on more actions like delete connection in: [Manage connections](./manage-connections.md).
### Test the flow
:::{admonition} Note
Testing flow will NOT create a batch run record, therefore it's unable to use commands like `pf run show-details` to get the run information. If you want to persist the run record, see [Run and evaluate a flow](./run-and-evaluate-a-flow/index.md)
:::
Assuming you are in working directory `promptflow/examples/flows/standard/`
::::{tab-set}
:::{tab-item} CLI
:sync: CLI
Change the default input to the value you want to test.
![q_0](../media/how-to-guides/quick-start/flow-directory-and-dag-yaml.png)
```sh
pf flow test --flow web-classification # "web-classification" is the directory name
```
![flow-test-output-cli](../media/how-to-guides/quick-start/flow-test-output-cli.png)
:::
:::{tab-item} SDK
:sync: SDK
The return value of `test` function is the flow/node outputs.
```python
from promptflow import PFClient
pf = PFClient()
flow_path = "web-classification" # "web-classification" is the directory name
# Test flow
flow_inputs = {"url": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", "answer": "Channel", "evidence": "Url"} # The inputs of the flow.
flow_result = pf.test(flow=flow_path, inputs=flow_inputs)
print(f"Flow outputs: {flow_result}")
# Test node in the flow
node_name = "fetch_text_content_from_url" # The node name in the flow.
node_inputs = {"url": "https://www.youtube.com/watch?v=o5ZQyXaAv1g"} # The inputs of the node.
node_result = pf.test(flow=flow_path, inputs=node_inputs, node=node_name)
print(f"Node outputs: {node_result}")
```
![Flow test outputs](../media/how-to-guides/quick-start/flow_test_output.png)
:::
:::{tab-item} VS Code Extension
:sync: VS Code Extension
Use the code lens action on the top of the yaml editor to trigger flow test
![dag_yaml_flow_test](../media/how-to-guides/quick-start/test_flow_dag_yaml.gif)
Click the run flow button on the top of the visual editor to trigger flow test.
![visual_editor_flow_test](../media/how-to-guides/quick-start/test_flow_dag_editor.gif)
:::
::::
See more details of this topic in [Initialize and test a flow](./init-and-test-a-flow.md).
## Next steps
Learn more on how to:
- [Develop a flow](./develop-a-flow/index.md): details on how to develop a flow by writing a flow yaml from scratch.
- [Initialize and test a flow](./init-and-test-a-flow.md): details on how develop a flow from scratch or existing code.
- [Add conditional control to a flow](./add-conditional-control-to-a-flow.md): how to use activate config to add conditional control to a flow.
- [Run and evaluate a flow](./run-and-evaluate-a-flow/index.md): run and evaluate the flow using multi line data file.
- [Deploy a flow](./deploy-a-flow/index.md): how to deploy the flow as a web app.
- [Manage connections](./manage-connections.md): how to manage the endpoints/secrets information to access external services including LLMs.
- [Prompt flow in Azure AI](../cloud/azureai/quick-start/index.md): run and evaluate flow in Azure AI where you can collaborate with team better.
And you can also check our [examples](https://github.com/microsoft/promptflow/tree/main/examples), especially:
- [Getting started with prompt flow](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/get-started/quickstart.ipynb): the notebook covering the python sdk experience for sample introduced in this doc.
- [Tutorial: Chat with PDF](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/e2e-development/chat-with-pdf.md): An end-to-end tutorial on how to build a high quality chat application with prompt flow, including flow development and evaluation with metrics.
| promptflow/docs/how-to-guides/quick-start.md/0 | {
"file_path": "promptflow/docs/how-to-guides/quick-start.md",
"repo_id": "promptflow",
"token_count": 3766
} | 4 |
# Content Safety (Text)
Azure Content Safety is a content moderation service developed by Microsoft that help users detect harmful content from different modalities and languages. This tool is a wrapper for the Azure Content Safety Text API, which allows you to detect text content and get moderation results. See the [Azure Content Safety](https://aka.ms/acs-doc) for more information.
## Requirements
- For AzureML users, the tool is installed in default image, you can use the tool without extra installation.
- For local users,
`pip install promptflow-tools`
> [!NOTE]
> Content Safety (Text) tool is now incorporated into the latest `promptflow-tools` package. If you have previously installed the package `promptflow-contentsafety`, please uninstall it to avoid the duplication in your local tool list.
## Prerequisites
- Create an [Azure Content Safety](https://aka.ms/acs-create) resource.
- Add "Azure Content Safety" connection in prompt flow. Fill "API key" field with "Primary key" from "Keys and Endpoint" section of created resource.
## Inputs
You can use the following parameters as inputs for this tool:
| Name | Type | Description | Required |
| ---- | ---- | ----------- | -------- |
| text | string | The text that need to be moderated. | Yes |
| hate_category | string | The moderation sensitivity for Hate category. You can choose from four options: *disable*, *low_sensitivity*, *medium_sensitivity*, or *high_sensitivity*. The *disable* option means no moderation for hate category. The other three options mean different degrees of strictness in filtering out hate content. The default option is *medium_sensitivity*. | Yes |
| sexual_category | string | The moderation sensitivity for Sexual category. You can choose from four options: *disable*, *low_sensitivity*, *medium_sensitivity*, or *high_sensitivity*. The *disable* option means no moderation for sexual category. The other three options mean different degrees of strictness in filtering out sexual content. The default option is *medium_sensitivity*. | Yes |
| self_harm_category | string | The moderation sensitivity for Self-harm category. You can choose from four options: *disable*, *low_sensitivity*, *medium_sensitivity*, or *high_sensitivity*. The *disable* option means no moderation for self-harm category. The other three options mean different degrees of strictness in filtering out self_harm content. The default option is *medium_sensitivity*. | Yes |
| violence_category | string | The moderation sensitivity for Violence category. You can choose from four options: *disable*, *low_sensitivity*, *medium_sensitivity*, or *high_sensitivity*. The *disable* option means no moderation for violence category. The other three options mean different degrees of strictness in filtering out violence content. The default option is *medium_sensitivity*. | Yes |
For more information, please refer to [Azure Content Safety](https://aka.ms/acs-doc)
## Outputs
The following is an example JSON format response returned by the tool:
<details>
<summary>Output</summary>
```json
{
"action_by_category": {
"Hate": "Accept",
"SelfHarm": "Accept",
"Sexual": "Accept",
"Violence": "Accept"
},
"suggested_action": "Accept"
}
```
</details>
The `action_by_category` field gives you a binary value for each category: *Accept* or *Reject*. This value shows if the text meets the sensitivity level that you set in the request parameters for that category.
The `suggested_action` field gives you an overall recommendation based on the four categories. If any category has a *Reject* value, the `suggested_action` will be *Reject* as well.
| promptflow/docs/reference/tools-reference/contentsafety_text_tool.md/0 | {
"file_path": "promptflow/docs/reference/tools-reference/contentsafety_text_tool.md",
"repo_id": "promptflow",
"token_count": 917
} | 5 |
promptflow.tools.azure_content_safety.analyze_text:
module: promptflow.tools.azure_content_safety
function: analyze_text
inputs:
connection:
type:
- AzureContentSafetyConnection
hate_category:
default: medium_sensitivity
enum:
- disable
- low_sensitivity
- medium_sensitivity
- high_sensitivity
type:
- string
self_harm_category:
default: medium_sensitivity
enum:
- disable
- low_sensitivity
- medium_sensitivity
- high_sensitivity
type:
- string
sexual_category:
default: medium_sensitivity
enum:
- disable
- low_sensitivity
- medium_sensitivity
- high_sensitivity
type:
- string
text:
type:
- string
violence_category:
default: medium_sensitivity
enum:
- disable
- low_sensitivity
- medium_sensitivity
- high_sensitivity
type:
- string
name: Content Safety (Text Analyze)
description: Use Azure Content Safety to detect harmful content.
type: python
deprecated_tools:
- content_safety_text.tools.content_safety_text_tool.analyze_text
| promptflow/src/promptflow-tools/promptflow/tools/yamls/azure_content_safety.yaml/0 | {
"file_path": "promptflow/src/promptflow-tools/promptflow/tools/yamls/azure_content_safety.yaml",
"repo_id": "promptflow",
"token_count": 492
} | 6 |
@echo off
setlocal
SET PF_INSTALLER=PIP
IF EXIST "%~dp0\python.exe" (
"%~dp0\python.exe" -m promptflow._cli._pf.entry %*
) ELSE (
python -m promptflow._cli._pf.entry %*
)
| promptflow/src/promptflow/pf.bat/0 | {
"file_path": "promptflow/src/promptflow/pf.bat",
"repo_id": "promptflow",
"token_count": 80
} | 7 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from pathlib import Path
from dotenv import dotenv_values
from promptflow._cli._params import add_param_connection_name, add_param_env, base_params
from promptflow._cli._utils import _set_workspace_argument_for_subparsers, activate_action, get_client_for_cli
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.connections import CustomConnection
from promptflow.contracts.types import Secret
logger = get_cli_sdk_logger()
def add_connection_parser(subparsers):
connection_parser = subparsers.add_parser(
"connection", description="A CLI tool to manage connections for promptflow.", help="pf connection"
)
subparsers = connection_parser.add_subparsers()
add_connection_create(subparsers)
add_connection_get(subparsers)
connection_parser.set_defaults(action="connection")
def add_connection_create(subparsers):
add_param_type = lambda parser: parser.add_argument( # noqa: E731
"--type",
type=str,
help='Type of the connection, Possible values include: "OpenAI", "AzureOpenAI", "Serp", "Bing", '
'"Custom", "AzureContentSafety", "CognitiveSearch", "SubstrateLLM',
)
add_params = [
_set_workspace_argument_for_subparsers,
add_param_connection_name,
add_param_type,
add_param_env,
] + base_params
activate_action(
name="create",
description="Create a connection for promptflow.",
epilog=None,
add_params=add_params,
subparsers=subparsers,
help_message="pf connection create",
action_param_name="sub_action",
)
def add_connection_get(subparsers):
add_params = [
_set_workspace_argument_for_subparsers,
add_param_connection_name,
add_param_env,
] + base_params
activate_action(
name="get",
description="Get a connection for promptflow.",
epilog=None,
add_params=add_params,
subparsers=subparsers,
help_message="pf connection get",
action_param_name="sub_action",
)
def _get_conn_operations(subscription_id, resource_group, workspace_name):
from promptflow.azure import PFClient
client = get_client_for_cli(
subscription_id=subscription_id, workspace_name=workspace_name, resource_group_name=resource_group
)
pf = PFClient(ml_client=client)
return pf._connections
def create_conn(name, type, env, subscription_id, resource_group, workspace_name):
from promptflow._sdk.entities._connection import _Connection
if not Path(env).exists():
raise ValueError(f"Env file {env} does not exist.")
try:
dot_env = dotenv_values(env)
except Exception as e:
raise ValueError(f"Failed to load env file {env}. Error: {e}")
custom_configs = CustomConnection(**{k: Secret(v) for k, v in dot_env.items()})
connection = _Connection(name=name, type=type, custom_configs=custom_configs, connection_scope="WorkspaceShared")
conn_ops = _get_conn_operations(subscription_id, resource_group, workspace_name)
result = conn_ops.create_or_update(connection=connection)
print(result._to_yaml())
def get_conn(name, subscription_id, resource_group, workspace_name):
conn_ops = _get_conn_operations(subscription_id, resource_group, workspace_name)
result = conn_ops.get(name=name)
print(result._to_yaml())
| promptflow/src/promptflow/promptflow/_cli/_pf_azure/_connection.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/_pf_azure/_connection.py",
"repo_id": "promptflow",
"token_count": 1313
} | 8 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from promptflow import tool
# The inputs section will change based on the arguments of the tool function, after you save the code
# Adding type to arguments and return value will help the system show the types properly
# Please update the function name/signature per need
@tool
def my_python_tool(input1: str) -> str:
return "Prompt: " + input1
| promptflow/src/promptflow/promptflow/_cli/data/standard_flow/hello.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/data/standard_flow/hello.py",
"repo_id": "promptflow",
"token_count": 114
} | 9 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import inspect
import logging
from abc import ABC
from dataclasses import InitVar, asdict, dataclass, field
from enum import Enum
from typing import Callable, Dict, List, Optional, Union
from promptflow._core.tracer import _traced
from promptflow.contracts.trace import TraceType
module_logger = logging.getLogger(__name__)
STREAMING_OPTION_PARAMETER_ATTR = "_streaming_option_parameter"
# copied from promptflow.contracts.tool import ToolType
class ToolType(str, Enum):
LLM = "llm"
PYTHON = "python"
CSHARP = "csharp"
PROMPT = "prompt"
_ACTION = "action"
CUSTOM_LLM = "custom_llm"
class ToolInvoker(ABC):
_active_tool_invoker: Optional["ToolInvoker"] = None
def invoke_tool(self, f, *args, **kwargs):
raise NotImplementedError()
@classmethod
def activate(cls, tool_invoker: "ToolInvoker"):
cls._active_tool_invoker = tool_invoker
@classmethod
def deactivate(cls):
cls._active_tool_invoker = None
@classmethod
def active_instance(cls) -> Optional["ToolInvoker"]:
return cls._active_tool_invoker
def tool(
func=None,
*,
name: str = None,
description: str = None,
type: str = None,
input_settings=None,
streaming_option_parameter: Optional[str] = None,
**kwargs,
) -> Callable:
"""Decorator for tool functions. The decorated function will be registered as a tool and can be used in a flow.
:param name: The tool name.
:type name: str
:param description: The tool description.
:type description: str
:param type: The tool type.
:type type: str
:param input_settings: Dict of input setting.
:type input_settings: Dict[str, promptflow.entities.InputSetting]
:return: The decorated function.
:rtype: Callable
"""
def tool_decorator(func: Callable) -> Callable:
from promptflow.exceptions import UserErrorException
if type is not None and type not in [k.value for k in ToolType]:
raise UserErrorException(f"Tool type {type} is not supported yet.")
# Calls to tool functions should be traced automatically.
new_f = _traced(func, trace_type=TraceType.TOOL)
new_f.__tool = None # This will be set when generating the tool definition.
new_f.__name = name
new_f.__description = description
new_f.__type = type
new_f.__input_settings = input_settings
new_f.__extra_info = kwargs
if streaming_option_parameter and isinstance(streaming_option_parameter, str):
setattr(new_f, STREAMING_OPTION_PARAMETER_ATTR, streaming_option_parameter)
return new_f
# enable use decorator without "()" if all arguments are default values
if func is not None:
return tool_decorator(func)
return tool_decorator
def parse_all_args(argnames, args, kwargs) -> dict:
"""Parse args + kwargs to kwargs."""
all_args = {name: value for name, value in zip(argnames, args)}
all_args.update(kwargs)
return all_args
class ToolProvider(ABC):
"""The base class of tool class."""
_initialize_inputs = None
_required_initialize_inputs = None
_instance_init_params = None
def __new__(cls, *args, **kwargs):
# Record the init parameters, use __new__ so that user doesn't need to
# repeat parameters when calling super().__init__()
cls._instance_init_params = parse_all_args(cls.get_initialize_inputs().keys(), args, kwargs)
return super(ToolProvider, cls).__new__(cls)
def __init__(self):
"""
Define the base inputs of each tool.
All the parameters of __init__ will be added to inputs of each @tool in the class.
"""
self._init_params = self._instance_init_params
@classmethod
def get_initialize_inputs(cls):
if not cls._initialize_inputs:
cls._initialize_inputs = {
k: v for k, v in inspect.signature(cls.__init__).parameters.items() if k != "self"
}
return cls._initialize_inputs
@classmethod
def get_required_initialize_inputs(cls):
if not cls._required_initialize_inputs:
cls._required_initialize_inputs = {
k: v
for k, v in inspect.signature(cls.__init__).parameters.items()
if k != "self" and v.default is inspect.Parameter.empty
}
return cls._required_initialize_inputs
@dataclass
class DynamicList:
function: InitVar[Union[str, Callable]]
"""The dynamic list function."""
input_mapping: InitVar[Dict] = None
"""The mapping between dynamic list function inputs and tool inputs."""
func_path: str = field(init=False)
func_kwargs: List = field(init=False)
def __post_init__(self, function, input_mapping):
from promptflow._sdk._constants import SKIP_FUNC_PARAMS
from promptflow._utils.tool_utils import _get_function_path, function_to_interface
self._func_obj, self.func_path = _get_function_path(function)
self._input_mapping = input_mapping or {}
dynamic_list_func_inputs, _, _, _ = function_to_interface(
self._func_obj, gen_custom_type_conn=True, skip_prompt_template=True
)
# Get function input info
self.func_kwargs = []
inputs = inspect.signature(self._func_obj).parameters
for name, value in dynamic_list_func_inputs.items():
if name not in SKIP_FUNC_PARAMS:
input_info = {"name": name}
input_info.update(asdict(value, dict_factory=lambda x: {k: v for (k, v) in x if v}))
if name in self._input_mapping:
input_info["reference"] = f"${{inputs.{self._input_mapping[name]}}}"
input_info["optional"] = inputs[name].default is not inspect.Parameter.empty
if input_info["optional"]:
input_info["default"] = inputs[name].default
self.func_kwargs.append(input_info)
@dataclass
class GeneratedBy:
"""Settings of the generated by"""
function: InitVar[Union[str, Callable]]
"""The generated by function."""
reverse_function: InitVar[Union[str, Callable]]
"""The reverse generated by function."""
input_settings: InitVar[Dict[str, object]] = None
"""The input settings of generated by function."""
func_path: str = field(init=False)
func_kwargs: List = field(init=False)
reverse_func_path: str = field(init=False)
def __post_init__(self, function, reverse_function, input_settings):
from promptflow._sdk._constants import SKIP_FUNC_PARAMS, UIONLY_HIDDEN
from promptflow._utils.tool_utils import _get_function_path, function_to_interface
self._func_obj, self.func_path = _get_function_path(function=function)
self._reverse_func_obj, self.reverse_func_path = _get_function_path(function=reverse_function)
self._input_settings = {}
generated_func_inputs, _, _, _ = function_to_interface(
self._func_obj, gen_custom_type_conn=True, skip_prompt_template=True
)
# Get function input info
self.func_kwargs = []
func_inputs = inspect.signature(self._func_obj).parameters
for name, value in generated_func_inputs.items():
if name not in SKIP_FUNC_PARAMS:
# Update kwargs in generated_by settings
input_info = {"name": name}
input_info.update(asdict(value, dict_factory=lambda x: {k: v for (k, v) in x if v}))
input_info["reference"] = f"${{inputs.{name}}}"
input_info["optional"] = func_inputs[name].default is not inspect.Parameter.empty
self.func_kwargs.append(input_info)
# Generated generated_by input settings in tool func
if name in input_settings:
self._input_settings[name] = asdict(
input_settings[name], dict_factory=lambda x: {k: v for (k, v) in x if v}
)
if "type" in input_info:
self._input_settings[name]["type"] = input_info["type"]
self._input_settings[name]["input_type"] = UIONLY_HIDDEN
@dataclass(init=False)
class InputSetting:
"""Settings of the tool input"""
is_multi_select: bool = None
"""Allow user to select multiple values."""
allow_manual_entry: bool = None
"""Allow user to enter input value manually."""
enabled_by: str = None
"""The input field which must be an enum type, that controls the visibility of the dependent input field."""
enabled_by_value: List = None
"""Defines the accepted enum values from the enabled_by field that will make this dependent input field visible."""
dynamic_list: DynamicList = None
"""Settings of dynamic list function."""
generated_by: GeneratedBy = None
"""Settings of generated by function."""
def __init__(self, **kwargs):
self.is_multi_select = kwargs.pop("is_multi_select", None)
self.allow_manual_entry = kwargs.pop("allow_manual_entry", None)
self.enabled_by = kwargs.pop("enabled_by", None)
self.enabled_by_value = kwargs.pop("enabled_by_value", None)
self.dynamic_list = kwargs.pop("dynamic_list", None)
self.generated_by = kwargs.pop("generated_by", None)
self._kwargs = kwargs
| promptflow/src/promptflow/promptflow/_core/tool.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_core/tool.py",
"repo_id": "promptflow",
"token_count": 3844
} | 10 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import time
from functools import partial, wraps
from typing import Tuple, Union
from sqlalchemy.exc import OperationalError
def retry(exception_to_check: Union[Exception, Tuple[Exception]], tries=4, delay=3, backoff=2, logger=None):
"""
From https://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param exception_to_check: the exception to check. may be a tuple of
exceptions to check
:type exception_to_check: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
:param logger: log the retry action if specified
:type logger: logging.Logger
"""
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
retry_times, delay_seconds = tries, delay
while retry_times > 1:
try:
if logger:
logger.info("Running %s, %d more tries to go.", str(f), retry_times)
return f(*args, **kwargs)
except exception_to_check:
time.sleep(delay_seconds)
retry_times -= 1
delay_seconds *= backoff
if logger:
logger.warning("%s, Retrying in %d seconds...", str(exception_to_check), delay_seconds)
return f(*args, **kwargs)
return f_retry # true decorator
return deco_retry
sqlite_retry = partial(retry, exception_to_check=OperationalError, tries=3, delay=0.5, backoff=1)()
| promptflow/src/promptflow/promptflow/_sdk/_orm/retry.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_orm/retry.py",
"repo_id": "promptflow",
"token_count": 868
} | 11 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import logging
from logging.handlers import RotatingFileHandler
from flask import Blueprint, Flask, jsonify
from werkzeug.exceptions import HTTPException
from promptflow._sdk._constants import HOME_PROMPT_FLOW_DIR, PF_SERVICE_LOG_FILE
from promptflow._sdk._service import Api
from promptflow._sdk._service.apis.collector import trace_collector
from promptflow._sdk._service.apis.connection import api as connection_api
from promptflow._sdk._service.apis.run import api as run_api
from promptflow._sdk._service.apis.span import api as span_api
from promptflow._sdk._service.apis.telemetry import api as telemetry_api
from promptflow._sdk._service.apis.ui import api as ui_api
from promptflow._sdk._service.utils.utils import FormattedException
from promptflow._sdk._utils import get_promptflow_sdk_version, read_write_by_user
def heartbeat():
response = {"promptflow": get_promptflow_sdk_version()}
return jsonify(response)
def create_app():
app = Flask(__name__)
app.add_url_rule("/heartbeat", view_func=heartbeat)
app.add_url_rule("/v1/traces", view_func=trace_collector, methods=["POST"])
with app.app_context():
api_v1 = Blueprint("Prompt Flow Service", __name__, url_prefix="/v1.0")
# Registers resources from namespace for current instance of api
api = Api(api_v1, title="Prompt Flow Service", version="1.0")
api.add_namespace(connection_api)
api.add_namespace(run_api)
api.add_namespace(telemetry_api)
api.add_namespace(span_api)
api.add_namespace(ui_api)
app.register_blueprint(api_v1)
# Disable flask-restx set X-Fields in header. https://flask-restx.readthedocs.io/en/latest/mask.html#usage
app.config["RESTX_MASK_SWAGGER"] = False
# Enable log
app.logger.setLevel(logging.INFO)
log_file = HOME_PROMPT_FLOW_DIR / PF_SERVICE_LOG_FILE
log_file.touch(mode=read_write_by_user(), exist_ok=True)
# Create a rotating file handler with a max size of 1 MB and keeping up to 1 backup files
handler = RotatingFileHandler(filename=log_file, maxBytes=1_000_000, backupCount=1)
formatter = logging.Formatter("[%(asctime)s][%(name)s][%(levelname)s] - %(message)s")
handler.setFormatter(formatter)
app.logger.addHandler(handler)
# Basic error handler
@api.errorhandler(Exception)
def handle_exception(e):
"""When any error occurs on the server, return a formatted error message."""
from dataclasses import asdict
if isinstance(e, HTTPException):
return asdict(FormattedException(e), dict_factory=lambda x: {k: v for (k, v) in x if v}), e.code
app.logger.error(e, exc_info=True, stack_info=True)
formatted_exception = FormattedException(e)
return (
asdict(formatted_exception, dict_factory=lambda x: {k: v for (k, v) in x if v}),
formatted_exception.status_code,
)
return app, api
| promptflow/src/promptflow/promptflow/_sdk/_service/app.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_service/app.py",
"repo_id": "promptflow",
"token_count": 1233
} | 12 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import json
import os
from abc import ABC, abstractmethod
from pathlib import Path
from promptflow._constants import DEFAULT_ENCODING
from promptflow._sdk._configuration import Configuration
from promptflow._sdk._serving.blueprint.monitor_blueprint import construct_monitor_blueprint
from promptflow._sdk._serving.blueprint.static_web_blueprint import construct_staticweb_blueprint
from promptflow._sdk._serving.monitor.flow_monitor import FlowMonitor
from promptflow._utils.yaml_utils import load_yaml
from promptflow._version import VERSION
from promptflow.contracts.flow import Flow
USER_AGENT = f"promptflow-local-serving/{VERSION}"
DEFAULT_STATIC_PATH = Path(__file__).parent.parent / "static"
class AppExtension(ABC):
def __init__(self, logger, **kwargs):
self.logger = logger
@abstractmethod
def get_flow_project_path(self) -> str:
"""Get flow project path."""
pass
@abstractmethod
def get_flow_name(self) -> str:
"""Get flow name."""
pass
@abstractmethod
def get_connection_provider(self) -> str:
"""Get connection provider."""
pass
@abstractmethod
def get_blueprints(self):
"""Get blueprints for current extension."""
pass
def get_override_connections(self, flow: Flow) -> (dict, dict):
"""
Get override connections for current extension.
:param flow: The flow to execute.
:type flow: ~promptflow._sdk.entities._flow.Flow
:return: The override connections, first dict is for connection data override, second dict is for connection name override. # noqa: E501
:rtype: (dict, dict)
"""
return {}, {}
def raise_ex_on_invoker_initialization_failure(self, ex: Exception):
"""
whether to raise exception when initializing flow invoker failed.
:param ex: The exception when initializing flow invoker.
:type ex: Exception
:return: Whether to raise exception when initializing flow invoker failed.
"""
return True
def get_user_agent(self) -> str:
"""Get user agent used for current extension."""
return USER_AGENT
def get_credential(self):
"""Get credential for current extension."""
return None
def get_metrics_common_dimensions(self):
"""Get common dimensions for metrics if exist."""
return self._get_common_dimensions_from_env()
def get_flow_monitor(self) -> FlowMonitor:
"""Get flow monitor for current extension."""
# default no data collector, no app insights metric exporter
return FlowMonitor(self.logger, self.get_flow_name(), None, metrics_recorder=None)
def _get_mlflow_project_path(self, project_path: str):
# check whether it's mlflow model
mlflow_metadata_file = os.path.join(project_path, "MLmodel")
if os.path.exists(mlflow_metadata_file):
with open(mlflow_metadata_file, "r", encoding=DEFAULT_ENCODING) as fin:
mlflow_metadata = load_yaml(fin)
flow_entry = mlflow_metadata.get("flavors", {}).get("promptflow", {}).get("entry")
if mlflow_metadata:
dag_path = os.path.join(project_path, flow_entry)
return str(Path(dag_path).parent.absolute())
return project_path
def _get_common_dimensions_from_env(self):
common_dimensions_str = os.getenv("PF_SERVING_METRICS_COMMON_DIMENSIONS", None)
if common_dimensions_str:
try:
common_dimensions = json.loads(common_dimensions_str)
return common_dimensions
except Exception as ex:
self.logger.warn(f"Failed to parse common dimensions with value={common_dimensions_str}: {ex}")
return {}
def _get_default_blueprints(self, static_folder=None):
static_web_blueprint = construct_staticweb_blueprint(static_folder)
monitor_print = construct_monitor_blueprint(self.get_flow_monitor())
return [static_web_blueprint, monitor_print]
class DefaultAppExtension(AppExtension):
"""default app extension for local serve."""
def __init__(self, logger, **kwargs):
self.logger = logger
static_folder = kwargs.get("static_folder", None)
self.static_folder = static_folder if static_folder else DEFAULT_STATIC_PATH
logger.info(f"Static_folder: {self.static_folder}")
app_config = kwargs.get("config", None) or {}
pf_config = Configuration(overrides=app_config)
logger.info(f"Promptflow config: {pf_config}")
self.connection_provider = pf_config.get_connection_provider()
def get_flow_project_path(self) -> str:
return os.getenv("PROMPTFLOW_PROJECT_PATH", ".")
def get_flow_name(self) -> str:
project_path = self.get_flow_project_path()
return Path(project_path).stem
def get_connection_provider(self) -> str:
return self.connection_provider
def get_blueprints(self):
return self._get_default_blueprints(self.static_folder)
| promptflow/src/promptflow/promptflow/_sdk/_serving/extension/default_extension.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_serving/extension/default_extension.py",
"repo_id": "promptflow",
"token_count": 1992
} | 13 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# this file is a middle layer between the local SDK and executor, it'll have some similar logic with cloud PFS.
import datetime
from pathlib import Path
from typing import Union
from promptflow._constants import FlowLanguage
from promptflow._sdk._constants import ContextAttributeKey, FlowRunProperties
from promptflow._sdk._utils import parse_variant
from promptflow._sdk.entities._flow import ProtectedFlow
from promptflow._sdk.entities._run import Run
from promptflow._sdk.operations._local_storage_operations import LocalStorageOperations
from promptflow._utils.context_utils import _change_working_dir
from promptflow.batch import BatchEngine
from promptflow.contracts.run_info import Status
from promptflow.contracts.run_mode import RunMode
from promptflow.exceptions import UserErrorException, ValidationException
from ..._utils.logger_utils import LoggerFactory
from .._configuration import Configuration
from .._load_functions import load_flow
from ..entities._eager_flow import EagerFlow
from .utils import SubmitterHelper, variant_overwrite_context
logger = LoggerFactory.get_logger(name=__name__)
class RunSubmitter:
"""Submit run to executor."""
def __init__(self, client):
self._client = client
self.run_operations = self._client.runs
def submit(self, run: Run, stream=False, **kwargs):
self._run_bulk(run=run, stream=stream, **kwargs)
return self.run_operations.get(name=run.name)
def _run_bulk(self, run: Run, stream=False, **kwargs):
attributes = kwargs.get("attributes", {})
# validate & resolve variant
if run.variant:
tuning_node, variant = parse_variant(run.variant)
else:
tuning_node, variant = None, None
if run.run is not None:
# Set for flow test against run and no experiment scenario
if ContextAttributeKey.REFERENCED_BATCH_RUN_ID not in attributes:
attributes[ContextAttributeKey.REFERENCED_BATCH_RUN_ID] = run.run
if isinstance(run.run, str):
run.run = self.run_operations.get(name=run.run)
elif not isinstance(run.run, Run):
error = TypeError(f"Referenced run must be a Run instance, got {type(run.run)}")
raise UserErrorException(message=str(error), error=error)
else:
# get the run again to make sure it's status is latest
run.run = self.run_operations.get(name=run.run.name)
if run.run.status != Status.Completed.value:
error = ValueError(f"Referenced run {run.run.name} is not completed, got status {run.run.status}")
raise UserErrorException(message=str(error), error=error)
run.run.outputs = self.run_operations._get_outputs(run.run)
# Start trace
if Configuration(overrides=self._client._config).is_internal_features_enabled():
from promptflow._trace._start_trace import start_trace
logger.debug("Starting trace for flow run...")
start_trace(session=kwargs.get("session", None), attributes=attributes)
self._validate_inputs(run=run)
local_storage = LocalStorageOperations(run, stream=stream, run_mode=RunMode.Batch)
with local_storage.logger:
flow_obj = load_flow(source=run.flow)
with variant_overwrite_context(flow_obj, tuning_node, variant, connections=run.connections) as flow:
self._submit_bulk_run(flow=flow, run=run, local_storage=local_storage)
@classmethod
def _validate_inputs(cls, run: Run):
if not run.run and not run.data:
error = ValidationException("Either run or data must be specified for flow run.")
raise UserErrorException(message=str(error), error=error)
def _submit_bulk_run(
self, flow: Union[ProtectedFlow, EagerFlow], run: Run, local_storage: LocalStorageOperations
) -> dict:
logger.info(f"Submitting run {run.name}, log path: {local_storage.logger.file_path}")
run_id = run.name
if flow.language == FlowLanguage.CSharp:
# TODO: consider moving this to Operations
from promptflow.batch import CSharpExecutorProxy
CSharpExecutorProxy.generate_metadata(flow_file=Path(flow.path), assembly_folder=Path(flow.code))
# TODO: shall we resolve connections here?
connections = []
else:
with _change_working_dir(flow.code):
connections = SubmitterHelper.resolve_connections(flow=flow)
column_mapping = run.column_mapping
# resolve environment variables
run.environment_variables = SubmitterHelper.load_and_resolve_environment_variables(
flow=flow, environment_variable_overrides=run.environment_variables
)
SubmitterHelper.init_env(environment_variables=run.environment_variables)
# prepare data
input_dirs = self._resolve_input_dirs(run)
self._validate_column_mapping(column_mapping)
batch_result = None
status = Status.Failed.value
exception = None
# create run to db when fully prepared to run in executor, otherwise won't create it
run._dump() # pylint: disable=protected-access
try:
batch_engine = BatchEngine(
flow.path,
flow.code,
connections=connections,
entry=flow.entry if isinstance(flow, EagerFlow) else None,
storage=local_storage,
log_path=local_storage.logger.file_path,
)
batch_result = batch_engine.run(
input_dirs=input_dirs,
inputs_mapping=column_mapping,
output_dir=local_storage.outputs_folder,
run_id=run_id,
)
error_logs = []
if batch_result.failed_lines > 0:
# Log warning message when there are failed line run in bulk run.
error_logs.append(
f"{batch_result.failed_lines} out of {batch_result.total_lines} runs failed in batch run."
)
if batch_result.error_summary.aggr_error_dict:
# log warning message when there are failed aggregation nodes in bulk run.
aggregation_nodes = list(batch_result.error_summary.aggr_error_dict.keys())
error_logs.append(f"aggregation nodes {aggregation_nodes} failed in batch run.")
# update error log
if error_logs and run.properties.get(FlowRunProperties.OUTPUT_PATH, None):
error_logs.append(
f" Please check out {run.properties[FlowRunProperties.OUTPUT_PATH]} for more details."
)
if error_logs:
logger.warning("\n".join(error_logs))
# The bulk run is completed if the batch_engine.run successfully completed.
status = Status.Completed.value
except Exception as e:
# when run failed in executor, store the exception in result and dump to file
logger.warning(f"Run {run.name} failed when executing in executor with exception {e}.")
exception = e
# for user error, swallow stack trace and return failed run since user don't need the stack trace
if not isinstance(e, UserErrorException):
# for other errors, raise it to user to help debug root cause.
raise e
# won't raise the exception since it's already included in run object.
finally:
# persist snapshot and result
# snapshot: flow directory
local_storage.dump_snapshot(flow)
# persist inputs, outputs and metrics
local_storage.persist_result(batch_result)
# exceptions
local_storage.dump_exception(exception=exception, batch_result=batch_result)
# system metrics: token related
system_metrics = batch_result.system_metrics.to_dict() if batch_result else {}
self.run_operations.update(
name=run.name,
status=status,
end_time=datetime.datetime.now(),
system_metrics=system_metrics,
)
def _resolve_input_dirs(self, run: Run):
result = {"data": run.data if run.data else None}
if run.run is not None:
result.update(
{
"run.outputs": self.run_operations._get_outputs_path(run.run),
# to align with cloud behavior, run.inputs should refer to original data
"run.inputs": self.run_operations._get_data_path(run.run),
}
)
return {k: str(Path(v).resolve()) for k, v in result.items() if v is not None}
@classmethod
def _validate_column_mapping(cls, column_mapping: dict):
if not column_mapping:
return
if not isinstance(column_mapping, dict):
raise ValidationException(f"Column mapping must be a dict, got {type(column_mapping)}.")
all_static = True
for v in column_mapping.values():
if isinstance(v, str) and v.startswith("$"):
all_static = False
break
if all_static:
raise ValidationException(
"Column mapping must contain at least one mapping binding, "
f"current column mapping contains all static values: {column_mapping}"
)
| promptflow/src/promptflow/promptflow/_sdk/_submitter/run_submitter.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_submitter/run_submitter.py",
"repo_id": "promptflow",
"token_count": 4087
} | 14 |
#!/bin/bash
echo "$(date -uIns) - promptflow-serve/finish $@"
# stop all gunicorn processes
echo "$(date -uIns) - Stopping all Gunicorn processes"
pkill gunicorn
while pgrep gunicorn >/dev/null; do
echo "$(date -uIns) - Gunicorn process is still running, waiting for 1s"
sleep 1
done
echo "$(date -uIns) - Stopped all Gunicorn processes" | promptflow/src/promptflow/promptflow/_sdk/data/docker/runit/promptflow-serve/finish.jinja2/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/data/docker/runit/promptflow-serve/finish.jinja2",
"repo_id": "promptflow",
"token_count": 123
} | 15 |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Run Details</title>
</head>
<body>
<div id="root"></div>
<script>
window.bulk_test_details_data = {{ data }}
</script>
<script src="{{ js_path }}"></script>
</body>
</html>
| promptflow/src/promptflow/promptflow/_sdk/data/visualize.j2/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/data/visualize.j2",
"repo_id": "promptflow",
"token_count": 154
} | 16 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from functools import lru_cache
from os import PathLike
from pathlib import Path
from typing import Dict
from promptflow._sdk._constants import NODES
from promptflow._sdk._utils import parse_variant
from promptflow._sdk.entities import FlowContext
from promptflow._sdk.entities._flow import Flow
from promptflow._utils.flow_utils import load_flow_dag
from promptflow.contracts.flow import Node
from promptflow.exceptions import UserErrorException
# Resolve flow context to invoker
# Resolve flow according to flow context
# Resolve connection, variant, overwrite, store in-memory
# create invoker based on resolved flow
# cache invoker if flow context not changed (define hash function for flow context).
class FlowContextResolver:
"""Flow context resolver."""
def __init__(self, flow_path: PathLike):
from promptflow import PFClient
self.flow_path, self.flow_dag = load_flow_dag(flow_path=Path(flow_path))
self.working_dir = Path(self.flow_path).parent.resolve()
self.node_name_2_node: Dict[str, Node] = {node["name"]: node for node in self.flow_dag[NODES]}
self.client = PFClient()
@classmethod
@lru_cache
def resolve(cls, flow: Flow) -> "FlowInvoker":
"""Resolve flow to flow invoker."""
resolver = cls(flow_path=flow.path)
resolver._resolve(flow_context=flow.context)
return resolver._create_invoker(flow_context=flow.context)
def _resolve(self, flow_context: FlowContext):
"""Resolve flow context."""
# TODO(2813319): support node overrides
# TODO: define priority of the contexts
flow_context._resolve_connections()
self._resolve_variant(flow_context=flow_context)._resolve_connections(
flow_context=flow_context,
)._resolve_overrides(flow_context=flow_context)
def _resolve_variant(self, flow_context: FlowContext) -> "FlowContextResolver":
"""Resolve variant of the flow and store in-memory."""
# TODO: put all varint string parser here
from promptflow._sdk._submitter import overwrite_variant
if not flow_context.variant:
tuning_node, variant = None, None
else:
tuning_node, variant = parse_variant(flow_context.variant)
overwrite_variant(
flow_dag=self.flow_dag,
tuning_node=tuning_node,
variant=variant,
)
return self
def _resolve_connections(self, flow_context: FlowContext) -> "FlowContextResolver":
"""Resolve connections of the flow and store in-memory."""
from promptflow._sdk._submitter import overwrite_connections
overwrite_connections(
flow_dag=self.flow_dag,
connections=flow_context.connections,
working_dir=self.working_dir,
)
return self
def _resolve_overrides(self, flow_context: FlowContext) -> "FlowContextResolver":
"""Resolve overrides of the flow and store in-memory."""
from promptflow._sdk._submitter import overwrite_flow
overwrite_flow(
flow_dag=self.flow_dag,
params_overrides=flow_context.overrides,
)
return self
def _resolve_connection_objs(self, flow_context: FlowContext):
# validate connection objs
connections = {}
for key, connection_obj in flow_context._connection_objs.items():
scrubbed_secrets = connection_obj._get_scrubbed_secrets()
if scrubbed_secrets:
raise UserErrorException(
f"Connection {connection_obj} contains scrubbed secrets with key {scrubbed_secrets.keys()}, "
"please make sure connection has decrypted secrets to use in flow execution. "
)
connections[key] = connection_obj._to_execution_connection_dict()
return connections
def _create_invoker(self, flow_context: FlowContext) -> "FlowInvoker":
from promptflow._sdk._serving.flow_invoker import FlowInvoker
connections = self._resolve_connection_objs(flow_context=flow_context)
# use updated flow dag to create new flow object for invoker
resolved_flow = Flow(code=self.working_dir, path=self.flow_path, dag=self.flow_dag)
invoker = FlowInvoker(
flow=resolved_flow,
connections=connections,
streaming=flow_context.streaming,
)
return invoker
| promptflow/src/promptflow/promptflow/_sdk/operations/_flow_context_resolver.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/operations/_flow_context_resolver.py",
"repo_id": "promptflow",
"token_count": 1775
} | 17 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
| promptflow/src/promptflow/promptflow/_utils/__init__.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_utils/__init__.py",
"repo_id": "promptflow",
"token_count": 23
} | 18 |
import tiktoken
from importlib.metadata import version
from promptflow.exceptions import UserErrorException
IS_LEGACY_OPENAI = version("openai").startswith("0.")
class OpenAIMetricsCalculator:
def __init__(self, logger=None) -> None:
self._logger = logger
def get_openai_metrics_from_api_call(self, api_call: dict):
total_metrics = {}
if self._need_collect_metrics(api_call):
try:
metrics = self._get_openai_metrics_for_signal_api(api_call)
self.merge_metrics_dict(total_metrics, metrics)
except Exception as ex:
self._log_warning(f"Failed to calculate metrics due to exception: {ex}.")
children = api_call.get("children")
if children is not None:
for child in children:
child_metrics = self.get_openai_metrics_from_api_call(child)
self.merge_metrics_dict(total_metrics, child_metrics)
api_call["system_metrics"] = total_metrics
return total_metrics
def _need_collect_metrics(self, api_call: dict):
if api_call.get("type") != "LLM":
return False
output = api_call.get("output")
if not isinstance(output, dict) and not isinstance(output, list):
return False
inputs = api_call.get("inputs")
if not isinstance(inputs, dict):
return False
return True
def _get_openai_metrics_for_signal_api(self, api_call: dict):
output = api_call.get("output")
if isinstance(output, dict):
usage = output.get("usage")
if isinstance(usage, dict):
return usage
self._log_warning(
"Cannot find openai metrics in output, " "will calculate metrics from response data directly."
)
name = api_call.get("name")
# Support both legacy api and OpenAI v1 api
# Legacy api:
# https://github.com/openai/openai-python/blob/v0.28.1/openai/api_resources/chat_completion.py
# https://github.com/openai/openai-python/blob/v0.28.1/openai/api_resources/completion.py
# OpenAI v1 api:
# https://github.com/openai/openai-python/blob/main/src/openai/resources/chat/completions.py
# https://github.com/openai/openai-python/blob/main/src/openai/resources/completions.py
if (
name == "openai.api_resources.chat_completion.ChatCompletion.create"
or name == "openai.resources.chat.completions.Completions.create" # openai v1
):
return self._get_openai_metrics_for_chat_api(api_call)
elif (
name == "openai.api_resources.completion.Completion.create"
or name == "openai.resources.completions.Completions.create" # openai v1
):
return self._get_openai_metrics_for_completion_api(api_call)
else:
raise CalculatingMetricsError(f"Calculating metrics for api {name} is not supported.")
def _try_get_model(self, inputs, output):
if IS_LEGACY_OPENAI:
api_type = inputs.get("api_type")
if not api_type:
raise CalculatingMetricsError("Cannot calculate metrics for none or empty api_type.")
if api_type == "azure":
model = inputs.get("engine")
else:
model = inputs.get("model")
else:
if isinstance(output, dict):
model = output.get("model")
else:
model = output[0].model if len(output) > 0 and hasattr(output[0], "model") else None
if not model:
model = inputs.get("model")
if not model:
raise CalculatingMetricsError(
"Cannot get a valid model to calculate metrics. "
"Please specify a engine for AzureOpenAI API or a model for OpenAI API."
)
return model
def _get_openai_metrics_for_chat_api(self, api_call):
inputs = api_call.get("inputs")
output = api_call.get("output")
metrics = {}
enc, tokens_per_message, tokens_per_name = self._get_encoding_for_chat_api(self._try_get_model(inputs, output))
metrics["prompt_tokens"] = self._get_prompt_tokens_from_messages(
inputs["messages"], enc, tokens_per_message, tokens_per_name
)
if isinstance(output, list):
if IS_LEGACY_OPENAI:
metrics["completion_tokens"] = len(output)
else:
metrics["completion_tokens"] = len(
[chunk for chunk in output if chunk.choices and chunk.choices[0].delta.content]
)
else:
metrics["completion_tokens"] = self._get_completion_tokens_for_chat_api(output, enc)
metrics["total_tokens"] = metrics["prompt_tokens"] + metrics["completion_tokens"]
return metrics
def _get_encoding_for_chat_api(self, model):
try:
enc = tiktoken.encoding_for_model(model)
except KeyError:
enc = tiktoken.get_encoding("cl100k_base")
if model == "gpt-35-turbo-0301":
tokens_per_message = 4
tokens_per_name = -1
elif "gpt-35-turbo" in model or "gpt-3.5-turbo" in model or "gpt-4" in model:
tokens_per_message = 3
tokens_per_name = 1
else:
raise CalculatingMetricsError(f"Calculating metrics for model {model} is not supported.")
return enc, tokens_per_message, tokens_per_name
def _get_prompt_tokens_from_messages(self, messages, enc, tokens_per_message, tokens_per_name):
prompt_tokens = 0
for message in messages:
prompt_tokens += tokens_per_message
for key, value in message.items():
prompt_tokens += len(enc.encode(value))
if key == "name":
prompt_tokens += tokens_per_name
prompt_tokens += 3
return prompt_tokens
def _get_completion_tokens_for_chat_api(self, output, enc):
completion_tokens = 0
choices = output.get("choices")
if isinstance(choices, list):
for ch in choices:
if isinstance(ch, dict):
message = ch.get("message")
if isinstance(message, dict):
content = message.get("content")
if isinstance(content, str):
completion_tokens += len(enc.encode(content))
return completion_tokens
def _get_openai_metrics_for_completion_api(self, api_call: dict):
metrics = {}
inputs = api_call.get("inputs")
output = api_call.get("output")
enc = self._get_encoding_for_completion_api(self._try_get_model(inputs, output))
metrics["prompt_tokens"] = 0
prompt = inputs.get("prompt")
if isinstance(prompt, str):
metrics["prompt_tokens"] = len(enc.encode(prompt))
elif isinstance(prompt, list):
for pro in prompt:
metrics["prompt_tokens"] += len(enc.encode(pro))
if isinstance(output, list):
if IS_LEGACY_OPENAI:
metrics["completion_tokens"] = len(output)
else:
metrics["completion_tokens"] = len(
[chunk for chunk in output if chunk.choices and chunk.choices[0].text]
)
else:
metrics["completion_tokens"] = self._get_completion_tokens_for_completion_api(output, enc)
metrics["total_tokens"] = metrics["prompt_tokens"] + metrics["completion_tokens"]
return metrics
def _get_encoding_for_completion_api(self, model):
try:
return tiktoken.encoding_for_model(model)
except KeyError:
return tiktoken.get_encoding("p50k_base")
def _get_completion_tokens_for_completion_api(self, output, enc):
completion_tokens = 0
choices = output.get("choices")
if isinstance(choices, list):
for ch in choices:
if isinstance(ch, dict):
text = ch.get("text")
if isinstance(text, str):
completion_tokens += len(enc.encode(text))
return completion_tokens
def merge_metrics_dict(self, metrics: dict, metrics_to_merge: dict):
for k, v in metrics_to_merge.items():
metrics[k] = metrics.get(k, 0) + v
def _log_warning(self, msg):
if self._logger:
self._logger.warning(msg)
class CalculatingMetricsError(UserErrorException):
"""The exception that is raised when calculating metrics failed."""
pass
| promptflow/src/promptflow/promptflow/_utils/openai_metrics_calculator.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_utils/openai_metrics_calculator.py",
"repo_id": "promptflow",
"token_count": 4132
} | 19 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from os import PathLike
from pathlib import Path
from typing import IO, AnyStr, Optional, Union
from ._utils import is_arm_id
def load_flow(
source: Union[str, PathLike, IO[AnyStr]],
*,
relative_origin: Optional[str] = None,
**kwargs,
):
"""Construct a flow object from a yaml file.
:param source: The local yaml source of a compute. Must be either a
path to a local file, or an already-open file.
If the source is a path, it will be open and read.
An exception is raised if the file does not exist.
If the source is an open file, the file will be read directly,
and an exception is raised if the file is not readable.
:type source: Union[PathLike, str, io.TextIOWrapper]
:param relative_origin: The origin to be used when deducing
the relative locations of files referenced in the parsed yaml.
Defaults to the inputted source's directory if it is a file or file path input.
Defaults to "./" if the source is a stream input with no name value.
:type relative_origin: str
:param params_override: Fields to overwrite on top of the yaml file.
Format is [{"field1": "value1"}, {"field2": "value2"}]
:type params_override: List[Dict]
:return: Loaded flow object.
:rtype: promptflow.azure.Flow
"""
from promptflow.azure._entities._flow import Flow
if is_arm_id(source):
return source
return Flow(path=Path(source))
| promptflow/src/promptflow/promptflow/azure/_load_functions.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_load_functions.py",
"repo_id": "promptflow",
"token_count": 531
} | 20 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.2, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._connection_operations import build_create_connection_request, build_delete_connection_request, build_get_connection_request, build_list_connection_specs_request, build_list_connections_request, build_update_connection_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ConnectionOperations:
"""ConnectionOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~flow.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def create_connection(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
connection_name: str,
body: Optional["_models.CreateOrUpdateConnectionRequest"] = None,
**kwargs: Any
) -> "_models.ConnectionEntity":
"""create_connection.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:param body:
:type body: ~flow.models.CreateOrUpdateConnectionRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionEntity, or the result of cls(response)
:rtype: ~flow.models.ConnectionEntity
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionEntity"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateOrUpdateConnectionRequest')
else:
_json = None
request = build_create_connection_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
content_type=content_type,
json=_json,
template_url=self.create_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ConnectionEntity', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_connection.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection/{connectionName}'} # type: ignore
@distributed_trace_async
async def update_connection(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
connection_name: str,
body: Optional["_models.CreateOrUpdateConnectionRequest"] = None,
**kwargs: Any
) -> "_models.ConnectionEntity":
"""update_connection.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:param body:
:type body: ~flow.models.CreateOrUpdateConnectionRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionEntity, or the result of cls(response)
:rtype: ~flow.models.ConnectionEntity
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionEntity"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateOrUpdateConnectionRequest')
else:
_json = None
request = build_update_connection_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
content_type=content_type,
json=_json,
template_url=self.update_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ConnectionEntity', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_connection.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection/{connectionName}'} # type: ignore
@distributed_trace_async
async def get_connection(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
connection_name: str,
**kwargs: Any
) -> "_models.ConnectionEntity":
"""get_connection.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionEntity, or the result of cls(response)
:rtype: ~flow.models.ConnectionEntity
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionEntity"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_connection_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
template_url=self.get_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ConnectionEntity', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_connection.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection/{connectionName}'} # type: ignore
@distributed_trace_async
async def delete_connection(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
connection_name: str,
connection_scope: Optional[Union[str, "_models.ConnectionScope"]] = None,
**kwargs: Any
) -> "_models.ConnectionEntity":
"""delete_connection.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:param connection_scope:
:type connection_scope: str or ~flow.models.ConnectionScope
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionEntity, or the result of cls(response)
:rtype: ~flow.models.ConnectionEntity
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionEntity"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_connection_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
connection_scope=connection_scope,
template_url=self.delete_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ConnectionEntity', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_connection.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection/{connectionName}'} # type: ignore
@distributed_trace_async
async def list_connections(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> List["_models.ConnectionEntity"]:
"""list_connections.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of ConnectionEntity, or the result of cls(response)
:rtype: list[~flow.models.ConnectionEntity]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ConnectionEntity"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_connections_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.list_connections.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[ConnectionEntity]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_connections.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection'} # type: ignore
@distributed_trace_async
async def list_connection_specs(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> List["_models.ConnectionSpec"]:
"""list_connection_specs.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of ConnectionSpec, or the result of cls(response)
:rtype: list[~flow.models.ConnectionSpec]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ConnectionSpec"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_connection_specs_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.list_connection_specs.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[ConnectionSpec]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_connection_specs.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection/specs'} # type: ignore
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/operations/_connection_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/operations/_connection_operations.py",
"repo_id": "promptflow",
"token_count": 6949
} | 21 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.2, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_submit_bulk_run_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/submit')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_cancel_flow_run_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "text/plain, application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/cancel')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_flow_run_info_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_flow_child_runs_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
index = kwargs.pop('index', None) # type: Optional[int]
start_index = kwargs.pop('start_index', None) # type: Optional[int]
end_index = kwargs.pop('end_index', None) # type: Optional[int]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/childRuns')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if index is not None:
query_parameters['index'] = _SERIALIZER.query("index", index, 'int')
if start_index is not None:
query_parameters['startIndex'] = _SERIALIZER.query("start_index", start_index, 'int')
if end_index is not None:
query_parameters['endIndex'] = _SERIALIZER.query("end_index", end_index, 'int')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_flow_node_runs_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
node_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
index = kwargs.pop('index', None) # type: Optional[int]
start_index = kwargs.pop('start_index', None) # type: Optional[int]
end_index = kwargs.pop('end_index', None) # type: Optional[int]
aggregation = kwargs.pop('aggregation', False) # type: Optional[bool]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/nodeRuns/{nodeName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
"nodeName": _SERIALIZER.url("node_name", node_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if index is not None:
query_parameters['index'] = _SERIALIZER.query("index", index, 'int')
if start_index is not None:
query_parameters['startIndex'] = _SERIALIZER.query("start_index", start_index, 'int')
if end_index is not None:
query_parameters['endIndex'] = _SERIALIZER.query("end_index", end_index, 'int')
if aggregation is not None:
query_parameters['aggregation'] = _SERIALIZER.query("aggregation", aggregation, 'bool')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_flow_node_run_base_path_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
node_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/nodeRuns/{nodeName}/basePath')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
"nodeName": _SERIALIZER.url("node_name", node_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_flow_run_log_content_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/logContent')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
# fmt: on
class BulkRunsOperations(object):
"""BulkRunsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~flow.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def submit_bulk_run(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
body=None, # type: Optional["_models.SubmitBulkRunRequest"]
**kwargs # type: Any
):
# type: (...) -> str
"""submit_bulk_run.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param body:
:type body: ~flow.models.SubmitBulkRunRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'SubmitBulkRunRequest')
else:
_json = None
request = build_submit_bulk_run_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
template_url=self.submit_bulk_run.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('str', pipeline_response)
if response.status_code == 204:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
submit_bulk_run.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/submit'} # type: ignore
@distributed_trace
def cancel_flow_run(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> str
"""cancel_flow_run.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_run_id:
:type flow_run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_cancel_flow_run_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
template_url=self.cancel_flow_run.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
cancel_flow_run.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/cancel'} # type: ignore
@distributed_trace
def get_flow_run_info(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.FlowRunInfo"
"""get_flow_run_info.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_run_id:
:type flow_run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowRunInfo, or the result of cls(response)
:rtype: ~flow.models.FlowRunInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRunInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_run_info_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
template_url=self.get_flow_run_info.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FlowRunInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_run_info.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}'} # type: ignore
@distributed_trace
def get_flow_child_runs(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
index=None, # type: Optional[int]
start_index=None, # type: Optional[int]
end_index=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> List[Any]
"""get_flow_child_runs.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_run_id:
:type flow_run_id: str
:param index:
:type index: int
:param start_index:
:type start_index: int
:param end_index:
:type end_index: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of any, or the result of cls(response)
:rtype: list[any]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List[Any]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_child_runs_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
index=index,
start_index=start_index,
end_index=end_index,
template_url=self.get_flow_child_runs.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[object]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_child_runs.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/childRuns'} # type: ignore
@distributed_trace
def get_flow_node_runs(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
node_name, # type: str
index=None, # type: Optional[int]
start_index=None, # type: Optional[int]
end_index=None, # type: Optional[int]
aggregation=False, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> List[Any]
"""get_flow_node_runs.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_run_id:
:type flow_run_id: str
:param node_name:
:type node_name: str
:param index:
:type index: int
:param start_index:
:type start_index: int
:param end_index:
:type end_index: int
:param aggregation:
:type aggregation: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of any, or the result of cls(response)
:rtype: list[any]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List[Any]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_node_runs_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
node_name=node_name,
index=index,
start_index=start_index,
end_index=end_index,
aggregation=aggregation,
template_url=self.get_flow_node_runs.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[object]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_node_runs.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/nodeRuns/{nodeName}'} # type: ignore
@distributed_trace
def get_flow_node_run_base_path(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
node_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.FlowRunBasePath"
"""get_flow_node_run_base_path.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_run_id:
:type flow_run_id: str
:param node_name:
:type node_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowRunBasePath, or the result of cls(response)
:rtype: ~flow.models.FlowRunBasePath
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRunBasePath"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_node_run_base_path_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
node_name=node_name,
template_url=self.get_flow_node_run_base_path.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FlowRunBasePath', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_node_run_base_path.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/nodeRuns/{nodeName}/basePath'} # type: ignore
@distributed_trace
def get_flow_run_log_content(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> str
"""get_flow_run_log_content.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_run_id:
:type flow_run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_run_log_content_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
template_url=self.get_flow_run_log_content.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_run_log_content.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/logContent'} # type: ignore
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/operations/_bulk_runs_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/operations/_bulk_runs_operations.py",
"repo_id": "promptflow",
"token_count": 13028
} | 22 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import asyncio
import json
from datetime import datetime
from json import JSONDecodeError
from pathlib import Path
from typing import Any, Mapping, Optional
import httpx
from promptflow._constants import DEFAULT_ENCODING, LINE_TIMEOUT_SEC
from promptflow._core._errors import MetaFileNotFound, MetaFileReadError, NotSupported, UnexpectedError
from promptflow._sdk._constants import FLOW_META_JSON, FLOW_TOOLS_JSON, PROMPT_FLOW_DIR_NAME
from promptflow._utils.async_utils import async_run_allowing_running_loop
from promptflow._utils.exception_utils import ErrorResponse, ExceptionPresenter
from promptflow._utils.logger_utils import bulk_logger
from promptflow._utils.utils import load_json
from promptflow.batch._errors import ExecutorServiceUnhealthy
from promptflow.contracts.run_info import FlowRunInfo
from promptflow.exceptions import ErrorTarget, ValidationException
from promptflow.executor._result import AggregationResult, LineResult
from promptflow.storage._run_storage import AbstractRunStorage
EXECUTOR_UNHEALTHY_MESSAGE = "The executor service is currently not in a healthy state"
class AbstractExecutorProxy:
@classmethod
def get_tool_metadata(cls, flow_file: Path, working_dir: Optional[Path] = None) -> dict:
"""Generate tool metadata file for the specified flow."""
return cls._get_tool_metadata(flow_file, working_dir or flow_file.parent)
def _get_flow_meta(self) -> dict:
"""Get the flow metadata from"""
raise NotImplementedError()
def get_inputs_definition(self) -> Mapping[str, Any]:
"""Get the inputs definition of an eager flow"""
from promptflow.contracts.flow import FlowInputDefinition
flow_meta = self._get_flow_meta()
inputs = {}
for key, value in flow_meta.get("inputs", {}).items():
# TODO: update this after we determine whether to accept list here or now
_type = value.get("type")
if isinstance(_type, list):
_type = _type[0]
value["type"] = _type
inputs[key] = FlowInputDefinition.deserialize(value)
return inputs
@classmethod
def _get_tool_metadata(cls, flow_file: Path, working_dir: Path) -> dict:
raise NotImplementedError()
@classmethod
async def create(
cls,
flow_file: Path,
working_dir: Optional[Path] = None,
*,
connections: Optional[dict] = None,
storage: Optional[AbstractRunStorage] = None,
**kwargs,
) -> "AbstractExecutorProxy":
"""Create a new executor"""
raise NotImplementedError()
async def destroy(self):
"""Destroy the executor"""
pass
async def exec_line_async(
self,
inputs: Mapping[str, Any],
index: Optional[int] = None,
run_id: Optional[str] = None,
) -> LineResult:
"""Execute a line"""
raise NotImplementedError()
async def exec_aggregation_async(
self,
batch_inputs: Mapping[str, Any],
aggregation_inputs: Mapping[str, Any],
run_id: Optional[str] = None,
) -> AggregationResult:
"""Execute aggregation nodes"""
raise NotImplementedError()
async def ensure_executor_health(self):
"""Ensure the executor service is healthy before execution"""
pass
class APIBasedExecutorProxy(AbstractExecutorProxy):
def __init__(
self,
*,
working_dir: Path = None,
enable_stream_output: bool = False,
):
"""Initialize the executor proxy with the working directory.
:param working_dir: The working directory of the executor, usually the flow directory,
where we can find metadata under .promptflow. Will use current working directory if not provided.
:type working_dir: Path
"""
self._working_dir = working_dir or Path.cwd()
self._enable_stream_output = enable_stream_output
# build-in integer is thread-safe in Python.
# ref: https://stackoverflow.com/questions/6320107/are-python-ints-thread-safe
self._active_generator_count = 0
@property
def enable_stream_output(self) -> bool:
"""Whether to enable the stream output."""
return self._enable_stream_output
@property
def working_dir(self) -> Path:
"""
The working directory of the executor, usually the flow directory,
where we can find metadata under .promptflow.
"""
return self._working_dir
# region Service Lifecycle Control when Streaming Output is Enabled
async def _activate_generator(self):
"""For streaming output, we will return a generator for the output, and the execution service
should keep alive until the generator is exhausted.
This method is used to increase the active generator count.
"""
self._active_generator_count += 1
async def _deactivate_generator(self):
"""For streaming output, we will return a generator for the output, and the execution service
should keep alive until the generator is exhausted.
This method is used to decrease the active generator count.
"""
self._active_generator_count -= 1
async def _all_generators_exhausted(self):
"""For streaming output, we will return a generator for the output, and the execution service
should keep alive until the generator is exhausted.
This method is to check if all generators are exhausted.
"""
# the count should never be negative, but still check it here for safety
return self._active_generator_count <= 0
async def destroy_if_all_generators_exhausted(self):
"""
client.stream api in exec_line function won't pass all response one time.
For API-based streaming chat flow, if executor proxy is destroyed, it will kill service process
and connection will close. this will result in subsequent getting generator content failed.
Besides, external caller usually wait for the destruction of executor proxy before it can continue and iterate
the generator content, so we can't keep waiting here.
On the other hand, the subprocess for execution service is not started in detach mode;
it wll exit when parent process exit. So we simply skip the destruction here.
"""
if await self._all_generators_exhausted():
await self.destroy()
# endregion
def _get_flow_meta(self) -> dict:
flow_meta_json_path = self.working_dir / PROMPT_FLOW_DIR_NAME / FLOW_META_JSON
if not flow_meta_json_path.is_file():
raise MetaFileNotFound(
message_format=(
# TODO: pf flow validate should be able to generate flow.json
"Failed to fetch meta of inputs: cannot find {file_path}, please retry."
),
file_path=flow_meta_json_path.absolute().as_posix(),
)
with open(flow_meta_json_path, mode="r", encoding=DEFAULT_ENCODING) as flow_meta_json_path:
return json.load(flow_meta_json_path)
@classmethod
def _get_tool_metadata(cls, flow_file: Path, working_dir: Path) -> dict:
flow_tools_json_path = working_dir / PROMPT_FLOW_DIR_NAME / FLOW_TOOLS_JSON
if flow_tools_json_path.is_file():
with open(flow_tools_json_path, mode="r", encoding=DEFAULT_ENCODING) as f:
try:
return json.load(f)
except json.JSONDecodeError:
raise MetaFileReadError(
message_format="Failed to fetch meta of tools: {file_path} is not a valid json file.",
file_path=flow_tools_json_path.absolute().as_posix(),
)
raise MetaFileNotFound(
message_format=(
"Failed to fetch meta of tools: cannot find {file_path}, please build the flow project first."
),
file_path=flow_tools_json_path.absolute().as_posix(),
)
@property
def api_endpoint(self) -> str:
"""The basic API endpoint of the executor service.
The executor proxy calls the executor service to get the
line results and aggregation result through this endpoint.
"""
raise NotImplementedError()
@property
def chat_output_name(self) -> Optional[str]:
"""The name of the chat output in the line result. Return None if the bonded flow is not a chat flow."""
# TODO: implement this based on _get_flow_meta
return None
def exec_line(
self,
inputs: Mapping[str, Any],
index: Optional[int] = None,
run_id: Optional[str] = None,
) -> LineResult:
"""Execute a line synchronously.
For now, we add this method to support the streaming output; maybe we can remove this method after we
figure out how to support streaming output in async mode.
If enable_stream_output is False, this method will call exec_line_async to get the line result.
:param inputs: The inputs of the line.
:type inputs: Mapping[str, Any]
:param index: The index of the line to execute.
:type index: Optional[int]
:param run_id: The id of the run.
:type run_id: Optional[str]
:param enable_stream_output: Whether to enable the stream output.
:type enable_stream_output: bool
:return: The line result.
:rtype: LineResult
"""
if not self.enable_stream_output:
return async_run_allowing_running_loop(
self.exec_line_async,
inputs=inputs,
index=index,
run_id=run_id,
)
start_time = datetime.utcnow()
# call execution api to get line results
url = self.api_endpoint + "/execution"
payload = {"run_id": run_id, "line_number": index, "inputs": inputs}
headers = {"Accept": "text/event-stream"}
def generator():
with httpx.Client() as client:
with client.stream("POST", url, json=payload, timeout=LINE_TIMEOUT_SEC, headers=headers) as response:
if response.status_code != 200:
result = self._process_http_response(response)
run_info = FlowRunInfo.create_with_error(start_time, inputs, index, run_id, result)
yield LineResult(output={}, aggregation_inputs={}, run_info=run_info, node_run_infos={})
for line in response.iter_lines():
chunk_data = json.loads(line)
# only support one chat output for now
yield LineResult.deserialize(chunk_data)
origin_generator = generator()
line_result = next(origin_generator)
async_run_allowing_running_loop(self._activate_generator)
if self.chat_output_name is not None and self.chat_output_name in line_result.output:
first_chat_output = line_result.output[self.chat_output_name]
def final_generator():
yield first_chat_output
for output in origin_generator:
yield output.output[self.chat_output_name]
async_run_allowing_running_loop(self._deactivate_generator)
# Note: the generator output should be saved in both line_result.output and line_result.run_info.output
line_result.output[self.chat_output_name] = final_generator()
line_result.run_info.output[self.chat_output_name] = final_generator()
# TODO: do we support streaming output for non-chat flow and what to return if so?
return line_result
async def exec_line_async(
self,
inputs: Mapping[str, Any],
index: Optional[int] = None,
run_id: Optional[str] = None,
) -> LineResult:
if self.enable_stream_output:
# Todo: update to async, will get no result in "async for" of final_generator function in async mode
raise NotSupported("Stream output is not supported in async mode for now")
start_time = datetime.utcnow()
# call execution api to get line results
url = self.api_endpoint + "/execution"
payload = {"run_id": run_id, "line_number": index, "inputs": inputs}
async with httpx.AsyncClient() as client:
response = await client.post(url, json=payload, timeout=LINE_TIMEOUT_SEC)
# process the response
result = self._process_http_response(response)
if response.status_code != 200:
run_info = FlowRunInfo.create_with_error(start_time, inputs, index, run_id, result)
return LineResult(output={}, aggregation_inputs={}, run_info=run_info, node_run_infos={})
return LineResult.deserialize(result)
async def exec_aggregation_async(
self,
batch_inputs: Mapping[str, Any],
aggregation_inputs: Mapping[str, Any],
run_id: Optional[str] = None,
) -> AggregationResult:
# call aggregation api to get aggregation result
async with httpx.AsyncClient() as client:
url = self.api_endpoint + "/aggregation"
payload = {"run_id": run_id, "batch_inputs": batch_inputs, "aggregation_inputs": aggregation_inputs}
response = await client.post(url, json=payload, timeout=LINE_TIMEOUT_SEC)
result = self._process_http_response(response)
return AggregationResult.deserialize(result)
async def ensure_executor_startup(self, error_file):
"""Ensure the executor service is initialized before calling the API to get the results"""
try:
await self.ensure_executor_health()
except ExecutorServiceUnhealthy as ex:
# raise the init error if there is any
startup_ex = self._check_startup_error_from_file(error_file) or ex
bulk_logger.error(f"Failed to start up the executor due to an error: {str(startup_ex)}")
await self.destroy()
raise startup_ex
async def ensure_executor_health(self):
"""Ensure the executor service is healthy before calling the API to get the results
During testing, we observed that the executor service started quickly on Windows.
However, there is a noticeable delay in booting on Linux.
So we set a specific waiting period. If the executor service fails to return to normal
within the allocated timeout, an exception is thrown to indicate a potential problem.
"""
retry_count = 0
max_retry_count = 20
while retry_count < max_retry_count:
if not self._is_executor_active():
bulk_logger.error("The executor service is not active. Please check the logs for more details.")
break
if await self._check_health():
return
# wait for 1s to prevent calling the API too frequently
await asyncio.sleep(1)
retry_count += 1
raise ExecutorServiceUnhealthy(f"{EXECUTOR_UNHEALTHY_MESSAGE}. Please resubmit your flow and try again.")
def _is_executor_active(self):
"""The interface function to check if the executor service is active"""
return True
async def _check_health(self):
try:
health_url = self.api_endpoint + "/health"
async with httpx.AsyncClient() as client:
response = await client.get(health_url)
if response.status_code != 200:
bulk_logger.warning(f"{EXECUTOR_UNHEALTHY_MESSAGE}. Response: {response.status_code} - {response.text}")
return False
return True
except Exception as e:
bulk_logger.warning(f"{EXECUTOR_UNHEALTHY_MESSAGE}. Error: {str(e)}")
return False
def _check_startup_error_from_file(self, error_file) -> Exception:
error_dict = load_json(error_file)
if error_dict:
error_response = ErrorResponse.from_error_dict(error_dict)
bulk_logger.error(
"Error when starting the executor service: "
f"[{error_response.innermost_error_code}] {error_response.message}"
)
return ValidationException(error_response.message, target=ErrorTarget.BATCH)
return None
def _process_http_response(self, response: httpx.Response):
if response.status_code == 200:
# if the status code is 200, the response is the json dict of a line result
return response.json()
else:
# use this instead of response.text to handle streaming response
response_text = response.read().decode(DEFAULT_ENCODING)
# if the status code is not 200, log the error
message_format = "Unexpected error when executing a line, status code: {status_code}, error: {error}"
bulk_logger.error(message_format.format(status_code=response.status_code, error=response_text))
# if response can be parsed as json, return the error dict
# otherwise, wrap the error in an UnexpectedError and return the error dict
try:
error_dict = json.loads(response_text)
return error_dict["error"]
except (JSONDecodeError, KeyError):
unexpected_error = UnexpectedError(
message_format=message_format, status_code=response.status_code, error=response_text
)
return ExceptionPresenter.create(unexpected_error).to_dict()
| promptflow/src/promptflow/promptflow/batch/_base_executor_proxy.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/batch/_base_executor_proxy.py",
"repo_id": "promptflow",
"token_count": 7144
} | 23 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import json
import logging
from dataclasses import asdict, dataclass
from enum import Enum
from typing import Any, Dict, List, Optional, Type, TypeVar
from promptflow._constants import CONNECTION_NAME_PROPERTY
from .multimedia import Image
from .types import AssistantDefinition, FilePath, PromptTemplate, Secret
logger = logging.getLogger(__name__)
T = TypeVar("T", bound="Enum")
def _deserialize_enum(cls: Type[T], val) -> T:
if not all(isinstance(i.value, str) for i in cls):
return val
typ = next((i for i in cls if val.lower() == i.value.lower()), None)
# Keep string value for unknown type, as they may be resolved later after some requisites imported.
# Type resolve will be ensured in 'ensure_node_inputs_type' before execution.
return typ if typ else val
class ValueType(str, Enum):
"""Value types."""
INT = "int"
DOUBLE = "double"
BOOL = "bool"
STRING = "string"
SECRET = "secret"
PROMPT_TEMPLATE = "prompt_template"
LIST = "list"
OBJECT = "object"
FILE_PATH = "file_path"
IMAGE = "image"
ASSISTANT_DEFINITION = "assistant_definition"
@staticmethod
def from_value(t: Any) -> "ValueType":
"""Get :class:`~promptflow.contracts.tool.ValueType` by value.
:param t: The value needs to get its :class:`~promptflow.contracts.tool.ValueType`
:type t: Any
:return: The :class:`~promptflow.contracts.tool.ValueType` of the given value
:rtype: ~promptflow.contracts.tool.ValueType
"""
if isinstance(t, Secret):
return ValueType.SECRET
if isinstance(t, PromptTemplate):
return ValueType.PROMPT_TEMPLATE
if isinstance(t, bool):
return ValueType.BOOL
if isinstance(t, int):
return ValueType.INT
if isinstance(t, float):
return ValueType.DOUBLE
# FilePath is a subclass of str, so it must be checked before str
if isinstance(t, FilePath):
return ValueType.FILE_PATH
if isinstance(t, str):
return ValueType.STRING
if isinstance(t, list):
return ValueType.LIST
if isinstance(t, AssistantDefinition):
return ValueType.ASSISTANT_DEFINITION
return ValueType.OBJECT
@staticmethod
def from_type(t: type) -> "ValueType":
"""Get :class:`~promptflow.contracts.tool.ValueType` by type.
:param t: The type needs to get its :class:`~promptflow.contracts.tool.ValueType`
:type t: type
:return: The :class:`~promptflow.contracts.tool.ValueType` of the given type
:rtype: ~promptflow.contracts.tool.ValueType
"""
if t == int:
return ValueType.INT
if t == float:
return ValueType.DOUBLE
if t == bool:
return ValueType.BOOL
if t == str:
return ValueType.STRING
if t == list:
return ValueType.LIST
if t == Secret:
return ValueType.SECRET
if t == PromptTemplate:
return ValueType.PROMPT_TEMPLATE
if t == FilePath:
return ValueType.FILE_PATH
if t == Image:
return ValueType.IMAGE
if t == AssistantDefinition:
return ValueType.ASSISTANT_DEFINITION
return ValueType.OBJECT
def parse(self, v: Any) -> Any: # noqa: C901
"""Parse value to the given :class:`~promptflow.contracts.tool.ValueType`.
:param v: The value needs to be parsed to the given :class:`~promptflow.contracts.tool.ValueType`
:type v: Any
:return: The parsed value
:rtype: Any
"""
if self == ValueType.INT:
return int(v)
if self == ValueType.DOUBLE:
return float(v)
if self == ValueType.BOOL:
if isinstance(v, bool):
return v
if isinstance(v, str) and v.lower() in {"true", "false"}:
return v.lower() == "true"
raise ValueError(f"Invalid boolean value {v!r}")
if self == ValueType.STRING:
return str(v)
if self == ValueType.LIST:
if isinstance(v, str):
v = json.loads(v)
if not isinstance(v, list):
raise ValueError(f"Invalid list value {v!r}")
return v
if self == ValueType.OBJECT:
if isinstance(v, str):
try:
return json.loads(v)
except Exception:
# Ignore the exception since it might really be a string
pass
# TODO: parse other types
return v
class ConnectionType:
"""This class provides methods to interact with connection types."""
@staticmethod
def get_connection_class(type_name: str) -> Optional[type]:
"""Get connection type by type name.
:param type_name: The type name of the connection
:type type_name: str
:return: The connection type
:rtype: type
"""
# Note: This function must be called after ensure_flow_valid, as required modules may not be imported yet,
# and connections may not be registered yet.
from promptflow._core.tools_manager import connections
if not isinstance(type_name, str):
return None
return connections.get(type_name)
@staticmethod
def is_connection_class_name(type_name: str) -> bool:
"""Check if the given type name is a connection type.
:param type_name: The type name of the connection
:type type_name: str
:return: Whether the given type name is a connection type
:rtype: bool
"""
return ConnectionType.get_connection_class(type_name) is not None
@staticmethod
def is_connection_value(val: Any) -> bool:
"""Check if the given value is a connection.
:param val: The value to check
:type val: Any
:return: Whether the given value is a connection
:rtype: bool
"""
# Note: This function must be called after ensure_flow_valid, as required modules may not be imported yet,
# and connections may not be registered yet.
from promptflow._core.tools_manager import connections
val = type(val) if not isinstance(val, type) else val
return val in connections.values() or ConnectionType.is_custom_strong_type(val)
@staticmethod
def is_custom_strong_type(val: Any) -> bool:
"""Check if the given value is a custom strong type connection.
:param val: The value to check
:type val: Any
:return: Whether the given value is a custom strong type
:rtype: bool
"""
from promptflow.connections import CustomStrongTypeConnection
val = type(val) if not isinstance(val, type) else val
try:
return issubclass(val, CustomStrongTypeConnection)
except TypeError as e:
# TypeError is not expected to happen, but if it does, we will log it for debugging and return False.
# The try-except block cannot be confidently removed due to the uncertainty of TypeError that may occur.
logger.warning(f"Failed to check if {val} is a custom strong type: {e}")
return False
@staticmethod
def serialize_conn(connection: Any) -> dict:
"""Serialize the given connection.
:param connection: The connection to serialize
:type connection: Any
:return: A dictionary representation of the connection.
:rtype: dict
"""
if not ConnectionType.is_connection_value(connection):
raise ValueError(f"Invalid connection value {connection!r}")
return getattr(connection, CONNECTION_NAME_PROPERTY, type(connection).__name__)
class ToolType(str, Enum):
"""Tool types."""
LLM = "llm"
PYTHON = "python"
CSHARP = "csharp"
PROMPT = "prompt"
_ACTION = "action"
CUSTOM_LLM = "custom_llm"
@dataclass
class InputDefinition:
"""Input definition."""
type: List[ValueType]
default: str = None
description: str = None
enum: List[str] = None
# Param 'custom_type' is currently used for inputs of custom strong type connection.
# For a custom strong type connection input, the type should be 'CustomConnection',
# while the custom_type should be the custom strong type connection class name.
custom_type: List[str] = None
def serialize(self) -> dict:
"""Serialize input definition to dict.
:return: The serialized input definition
:rtype: dict
"""
data = {}
data["type"] = [t.value for t in self.type]
if len(self.type) == 1:
data["type"] = self.type[0].value
if self.default:
data["default"] = str(self.default)
if self.description:
data["description"] = self.description
if self.enum:
data["enum"] = self.enum
if self.custom_type:
data["custom_type"] = self.custom_type
return data
@staticmethod
def deserialize(data: dict) -> "InputDefinition":
"""Deserialize dict to input definition.
:param data: The dict needs to be deserialized
:type data: dict
:return: The deserialized input definition
:rtype: ~promptflow.contracts.tool.InputDefinition
"""
def _deserialize_type(v):
v = [v] if not isinstance(v, list) else v
# Note: Connection type will be keep as string value,
# as they may be resolved later after some requisites imported.
return [_deserialize_enum(ValueType, item) for item in v]
return InputDefinition(
_deserialize_type(data["type"]),
data.get("default", ""),
data.get("description", ""),
data.get("enum", []),
data.get("custom_type", []),
)
def to_flow_input_definition(self):
"""Used for eager flow to convert input definition to flow input definition."""
from .flow import FlowInputDefinition
# TODO: To align with tool resolver we respect the first type if multiple types are provided,
# still need more discussion on this. Should we raise error if multiple types are provided?
return FlowInputDefinition(
type=self.type[0], default=self.default, description=self.description, enum=self.enum
)
@dataclass
class OutputDefinition:
"""Output definition."""
type: List["ValueType"]
description: str = ""
is_property: bool = False
def serialize(self) -> dict:
"""Serialize output definition to dict.
:return: The serialized output definition
:rtype: dict
"""
data = {"type": [t.value for t in self.type], "is_property": self.is_property}
if len(data["type"]) == 1:
data["type"] = data["type"][0]
if self.description:
data["description"] = self.description
return data
@staticmethod
def deserialize(data: dict) -> "OutputDefinition":
"""Deserialize dict to output definition.
:param data: The dict needs to be deserialized
:type data: dict
:return: The deserialized output definition
:rtype: ~promptflow.contracts.tool.OutputDefinition
"""
return OutputDefinition(
[ValueType(t) for t in data["type"]] if isinstance(data["type"], list) else [ValueType(data["type"])],
data.get("description", ""),
data.get("is_property", False),
)
@dataclass
class Tool:
"""Tool definition.
:param name: The name of the tool
:type name: str
:param type: The type of the tool
:type type: ~promptflow.contracts.tool.ToolType
:param inputs: The inputs of the tool
:type inputs: Dict[str, ~promptflow.contracts.tool.InputDefinition]
:param outputs: The outputs of the tool
:type outputs: Optional[Dict[str, ~promptflow.contracts.tool.OutputDefinition]]
:param description: The description of the tool
:type description: Optional[str]
:param module: The module of the tool
:type module: Optional[str]
:param class_name: The class name of the tool
:type class_name: Optional[str]
:param source: The source of the tool
:type source: Optional[str]
:param code: The code of the tool
:type code: Optional[str]
:param function: The function of the tool
:type function: Optional[str]
:param connection_type: The connection type of the tool
:type connection_type: Optional[List[str]]
:param is_builtin: Whether the tool is a built-in tool
:type is_builtin: Optional[bool]
:param stage: The stage of the tool
:type stage: Optional[str]
:param enable_kwargs: Whether to enable kwargs, only available for customer python tool
:type enable_kwargs: Optional[bool]
:param deprecated_tools: A list of old tool IDs that are mapped to the current tool ID.
:type deprecated_tools: Optional[List[str]]
"""
name: str
type: ToolType
inputs: Dict[str, InputDefinition]
outputs: Optional[Dict[str, OutputDefinition]] = None
description: Optional[str] = None
module: Optional[str] = None
class_name: Optional[str] = None
source: Optional[str] = None
code: Optional[str] = None
function: Optional[str] = None
connection_type: Optional[List[str]] = None
is_builtin: Optional[bool] = None
stage: Optional[str] = None
enable_kwargs: Optional[bool] = False
deprecated_tools: Optional[List[str]] = None
def serialize(self) -> dict:
"""Serialize tool to dict and skip None fields.
:return: The serialized tool
:rtype: dict
"""
data = asdict(self, dict_factory=lambda x: {k: v for (k, v) in x if v is not None and k != "outputs"})
if not self.type == ToolType._ACTION:
return data
# Pop unused field for action
skipped_fields = ["type", "inputs", "outputs"]
return {k: v for k, v in data.items() if k not in skipped_fields}
@staticmethod
def deserialize(data: dict) -> "Tool":
"""Deserialize dict to tool.
:param data: The dict needs to be deserialized
:type data: dict
:return: The deserialized tool
:rtype: ~promptflow.contracts.tool.Tool
"""
return Tool(
name=data["name"],
description=data.get("description", ""),
type=_deserialize_enum(ToolType, data["type"]),
inputs={k: InputDefinition.deserialize(i) for k, i in data.get("inputs", {}).items()},
outputs={k: OutputDefinition.deserialize(o) for k, o in data.get("outputs", {}).items()},
module=data.get("module"),
class_name=data.get("class_name"),
source=data.get("source"),
code=data.get("code"),
function=data.get("function"),
connection_type=data.get("connection_type"),
is_builtin=data.get("is_builtin"),
stage=data.get("stage"),
enable_kwargs=data.get("enable_kwargs", False),
deprecated_tools=data.get("deprecated_tools"),
)
def _require_connection(self) -> bool:
return self.type is ToolType.LLM or isinstance(self.connection_type, list) and len(self.connection_type) > 0
class ToolFuncCallScenario(str, Enum):
GENERATED_BY = "generated_by"
REVERSE_GENERATED_BY = "reverse_generated_by"
DYNAMIC_LIST = "dynamic_list"
| promptflow/src/promptflow/promptflow/contracts/tool.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/contracts/tool.py",
"repo_id": "promptflow",
"token_count": 6397
} | 24 |
import asyncio
import importlib
import inspect
import uuid
from pathlib import Path
from typing import Any, Callable, Mapping, Optional
from promptflow._constants import LINE_NUMBER_KEY
from promptflow._core.run_tracker import RunTracker
from promptflow._core.tool_meta_generator import PythonLoadError
from promptflow._core.tracer import Tracer, _traced
from promptflow._utils.dataclass_serializer import convert_eager_flow_output_to_dict
from promptflow._utils.logger_utils import logger
from promptflow._utils.tool_utils import function_to_interface
from promptflow._utils.yaml_utils import load_yaml
from promptflow.contracts.flow import Flow
from promptflow.executor._result import LineResult
from promptflow.storage import AbstractRunStorage
from promptflow.storage._run_storage import DefaultRunStorage
from .flow_executor import FlowExecutor
class ScriptExecutor(FlowExecutor):
def __init__(
self,
flow_file: Path,
connections: Optional[dict] = None,
working_dir: Optional[Path] = None,
*,
storage: Optional[AbstractRunStorage] = None,
):
logger.debug(f"Start initializing the executor with {flow_file}.")
self._flow_file = flow_file
self._working_dir = Flow._resolve_working_dir(flow_file, working_dir)
self._initialize_function()
self._connections = connections
self._storage = storage or DefaultRunStorage()
self._flow_id = "default_flow_id"
self._log_interval = 60
self._line_timeout_sec = 600
def exec_line(
self,
inputs: Mapping[str, Any],
index: Optional[int] = None,
run_id: Optional[str] = None,
**kwargs,
) -> LineResult:
run_id = run_id or str(uuid.uuid4())
with self._update_operation_context(run_id, index):
return self._exec_line(inputs, index, run_id)
def _exec_line(
self, inputs: Mapping[str, Any], index: Optional[int] = None, run_id: Optional[str] = None
) -> LineResult:
line_run_id = run_id if index is None else f"{run_id}_{index}"
run_tracker = RunTracker(self._storage)
run_info = run_tracker.start_flow_run(
flow_id=self._flow_id,
root_run_id=run_id,
run_id=line_run_id,
parent_run_id=run_id,
inputs=inputs,
index=index,
)
# Executor will add line_number to batch inputs if there is no line_number in the original inputs,
# which should be removed, so, we only preserve the inputs that are contained in self._inputs.
inputs = {k: inputs[k] for k in self._inputs if k in inputs}
output = None
traces = []
try:
Tracer.start_tracing(line_run_id)
if self._is_async:
output = asyncio.run(self._func(**inputs))
else:
output = self._func(**inputs)
traces = Tracer.end_tracing(line_run_id)
# Should convert output to dict before storing it to run info, since we will add key 'line_number' to it,
# so it must be a dict.
output_dict = convert_eager_flow_output_to_dict(output)
run_tracker.end_run(line_run_id, result=output_dict, traces=traces)
except Exception as e:
if not traces:
traces = Tracer.end_tracing(line_run_id)
run_tracker.end_run(line_run_id, ex=e, traces=traces)
finally:
run_tracker.persist_flow_run(run_info)
line_result = LineResult(output, {}, run_info, {})
# Return line result with index
if index is not None and isinstance(line_result.output, dict):
line_result.output[LINE_NUMBER_KEY] = index
return line_result
def enable_streaming_for_llm_flow(self, stream_required: Callable[[], bool]):
# TODO(2901157): check if eager mode should have streaming
return
def get_inputs_definition(self):
return self._inputs
def _initialize_function(self):
module_name, func_name = self._parse_flow_file()
module = importlib.import_module(module_name)
func = getattr(module, func_name, None)
if func is None or not inspect.isfunction(func):
raise PythonLoadError(
message_format="Failed to load python function '{func_name}' from file '{module_name}'.",
func_name=func_name,
module_name=module_name,
)
# If the function is not decorated with trace, add trace for it.
if not hasattr(func, "__original_function"):
func = _traced(func)
self._func = func
inputs, _, _, _ = function_to_interface(self._func)
self._inputs = {k: v.to_flow_input_definition() for k, v in inputs.items()}
self._is_async = inspect.iscoroutinefunction(self._func)
return func
def _parse_flow_file(self):
with open(self._working_dir / self._flow_file, "r", encoding="utf-8") as fin:
flow_dag = load_yaml(fin)
entry = flow_dag.get("entry", "")
module_name, func_name = entry.split(":")
return module_name, func_name
| promptflow/src/promptflow/promptflow/executor/_script_executor.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/executor/_script_executor.py",
"repo_id": "promptflow",
"token_count": 2198
} | 25 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from enum import Enum
from typing import Any, Dict, List, Optional, Union
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import AgentAction, AgentFinish, LLMResult
from promptflow._core.tracer import Trace, Tracer, TraceType
class LangChainEventType(Enum):
LLM = "LLM", 0
CHAIN = "CHAIN", 1
TOOL = "TOOL", 2
AGENT = "AGENT", 3
def __init__(self, _: str, level: int):
self._level = level
def __lt__(self, other: "LangChainEventType"):
return self._level < other._level
class PromptFlowCallbackHandler(BaseCallbackHandler):
""":class:`~promptflow.integrations.langchain.PromptFlowCallbackHandler` implements the
`langchain.callbacks.base.BaseCallbackHandler` interface, which has a method for each event that
can be subscribed to. The appropriate method will be called on the handler when the event is triggered.
"""
def __init__(self):
super().__init__()
self._tracer = Tracer.active_instance()
self._events_stack = [] # Use this to track the current event type to avoid popping the wrong event
@property
def always_verbose(self) -> bool:
"""Whether to always be verbose."""
return True
def _push(self, trace: Trace):
if not self._tracer:
return
self._tracer._push(trace)
def _pop(self, output=None, error: Optional[Exception] = None, event_type: Optional[LangChainEventType] = None):
"""Pop the trace from the trace stack.
PromptFlowCallbackHandler assumed that the langchain events are called in paris, with a corresponding
start and end event. However, this is not always true. Therefore, this function uses the event stack to
keep track of the current event type, in order to avoid popping the wrong event.
The function performs the following steps:
1. If the trace stack is empty, it simply returns without popping anything.
2. If the event type is None, it pops the top of the trace stack.
3. If the top of the event stack is equal to the given event type, it pops the top of the event stack
and trace stack.
4. If the top of the event stack is less than the given event type, indicating the previous event
without a corresponding end, it first pops the top of the event stack and then recursively calls the
_pop function to continue popping until the correct event type is found.
5. If the top of the event stack is greater than the given event type, indicating the current event
without a corresponding start, it simply returns without popping anything.
By following this approach, the function ensures that only the correct events are popped from the stacks.
"""
if not self._tracer:
return
if not event_type:
self._tracer._pop(output, error)
else:
if not self._events_stack:
return
if self._events_stack[-1] == event_type:
self._events_stack.pop()
self._tracer._pop(output, error)
elif self._events_stack[-1] < event_type:
self._events_stack.pop()
self._tracer._pop()
self._pop(output, error, event_type)
else:
return
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any) -> None:
"""Run when LLM starts running.
:param serialized: The serialized LLM object.
:type serialized: Dict[str, Any]
:param prompts: The prompts used to run LLM.
:type prompts: List[str]
"""
name = self._get_name(serialized) or "LLM"
trace = Trace(name, TraceType.LANGCHAIN, {"prompts": prompts})
self._events_stack.append(LangChainEventType.LLM)
self._push(trace)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run on new LLM token. Only available when streaming is enabled.
:param token: The new token.
:type token: str
"""
pass # Wo do not handle this event
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running.
:param response: The response from LLM.
:type response: LLMResult
"""
output = response
self._pop(output, event_type=LangChainEventType.LLM)
def on_llm_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> None:
"""Run when LLM errors.
:param error: The error from LLM.
:type error: Union[Exception, KeyboardInterrupt]
"""
self._pop(error=error, event_type=LangChainEventType.LLM)
def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain starts running.
:param serialized: The serialized chain object.
:type serialized: Dict[str, Any]
:param inputs: The inputs used to run chain.
:type inputs: Dict[str, Any]
"""
name = self._get_name(serialized) or "Chain"
trace = Trace(name, TraceType.LANGCHAIN, inputs)
self._events_stack.append(LangChainEventType.CHAIN)
self._push(trace)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running.
:param outputs: The outputs from chain.
:type outputs: Dict[str, Any]
"""
self._pop(outputs, event_type=LangChainEventType.CHAIN)
def on_chain_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> None:
"""Run when chain errors.
:param error: The error from chain.
:type error: Union[Exception, KeyboardInterrupt]
"""
self._pop(error=error, event_type=LangChainEventType.CHAIN)
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **kwargs: Any) -> None:
"""Run when tool starts running.
:param serialized: The serialized tool object.
:type serialized: Dict[str, Any]
:param input_str: The input string used to run tool.
:type input_str: str
"""
name = self._get_name(serialized) or "Tool"
trace = Trace(name, TraceType.LANGCHAIN, {"input_str": input_str})
self._events_stack.append(LangChainEventType.TOOL)
self._push(trace)
def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running.
:param output: The output from tool.
:type output: str
"""
self._pop(output, event_type=LangChainEventType.TOOL)
def on_tool_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> None:
"""Run when tool errors.
:param error: The error from tool.
:type error: Union[Exception, KeyboardInterrupt]
"""
self._pop(error=error, event_type=LangChainEventType.TOOL)
def on_text(self, text: str, **kwargs: Any) -> None:
"""Run on arbitrary text.
:param text: The text.
:type text: str
"""
pass
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> None:
"""Run on agent action.
:param action: The action from agent.
:type action: AgentAction
"""
name = action.tool
trace = Trace(name, TraceType.LANGCHAIN, {"tool_input": action.tool_input})
self._events_stack.append(LangChainEventType.AGENT)
self._push(trace)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run on agent end.
:param finish: The finish from agent.
:type finish: AgentFinish
"""
output = finish.return_values
self._pop(output, event_type=LangChainEventType.AGENT)
def _get_name(self, serialized: Dict[str, Any]):
# For version 0.0.197 and earlier, the name is stored in the "name" field,
# and for later versions, the name is stored in the "id" field.
# If none exists, return None and use a default name.
if "name" in serialized.keys():
return serialized["name"]
elif "id" in serialized.keys() and isinstance(serialized["id"], list):
return serialized["id"][-1]
else:
return None
| promptflow/src/promptflow/promptflow/integrations/langchain.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/integrations/langchain.py",
"repo_id": "promptflow",
"token_count": 3372
} | 26 |
# Change log of default runtime image
## Runtime image
In Azure Machine Learning prompt flow, runtime provides the environment to execute flows. The default runtime includes a pre-built Docker image, which contains all necessary dependent packages.
### Pull image
The image can be pulled by specifying a runtime version and executing the following command:
```
docker pull mcr.microsoft.com/azureml/promptflow/promptflow-runtime-stable:<runtime_version>
```
### Check image version
You can check the runtime image version from the flow execution log:
![img](../../media/cloud/runtime-change-log/runtime-version.png)
## Change log
Default runtime image is continuously updated, and here we record the new features and fixed bugs of each image version.
### 20240124.v3
#### New features
- Support downloading data from Azure Machine Learning registry for batch run.
- Show node status when one line of a batch run times out.
#### Bugs fixed
- Fix the bug that exception raised during preparing data is not set in run history.
- Fix the bug that unexpected exception is raised when executor process crushes.
### 20240116.v1
#### New features
NA
#### Bugs fixed
- Add validation for wrong connection type for LLM tool.
### 20240111.v2
#### New features
- Support error log scrubbing for heron jobs.
#### Bugs fixed
- Fixed the compatibility issue between runtime and promptflow package < 1.3.0
| promptflow/docs/cloud/azureai/runtime-change-log.md/0 | {
"file_path": "promptflow/docs/cloud/azureai/runtime-change-log.md",
"repo_id": "promptflow",
"token_count": 342
} | 0 |
# Distribute flow as executable app
:::{admonition} Experimental feature
This is an experimental feature, and may change at any time. Learn [more](../faq.md#stable-vs-experimental).
:::
We are going to use the [web-classification](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard/web-classification/) as
an example to show how to distribute flow as executable app with [Pyinstaller](https://pyinstaller.org/en/stable/requirements.html#).
Please ensure that you have installed all the required dependencies. You can refer to the "Prerequisites" section in the README of the [web-classification](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard/web-classification/) for a comprehensive list of prerequisites and installation instructions. And we recommend you to add a `requirements.txt` to indicate all the required dependencies for each flow.
[Pyinstaller](https://pyinstaller.org/en/stable/installation.html) is a popular tool used for converting Python applications into standalone executables. It allows you to package your Python scripts into a single executable file, which can be run on a target machine without requiring the Python interpreter to be installed.
[Streamlit](https://docs.streamlit.io/library/get-started) is an open-source Python library used for creating web applications quickly and easily. It's designed for data scientists and engineers who want to turn data scripts into shareable web apps with minimal effort.
We use Pyinstaller to package the flow and Streamlit to create custom web apps. Prior to distributing the workflow, kindly ensure that you have installed them.
## Build a flow as executable format
Note that all dependent connections must be created before building as executable.
```bash
# create connection if not created before
pf connection create --file ../../../examples/connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base> --name open_ai_connection
```
Use the command below to build a flow as executable format:
```bash
pf flow build --source <path-to-your-flow-folder> --output <your-output-dir> --format executable
```
## Executable format folder structure
Exported files & its dependencies are located in the same folder. The structure is as below:
- flow: the folder contains all the flow files.
- connections: the folder contains yaml files to create all related connections.
- app.py: the entry file is included as the entry point for the bundled application.
- app.spec: the spec file tells PyInstaller how to process your script.
- main.py: it will start streamlit service and be called by the entry file.
- settings.json: a json file to store the settings of the executable application.
- build: a folder contains various log and working files.
- dist: a folder contains the executable application.
- README.md: Simple introduction of the files.
### A template script of the entry file
PyInstaller reads a spec file or Python script written by you. It analyzes your code to discover every other module and library your script needs in order to execute. Then it collects copies of all those files, including the active Python interpreter, and puts them with your script in a single folder, or optionally in a single executable file.
We provide a Python entry script named `app.py` as the entry point for the bundled app, which enables you to serve a flow folder as an endpoint.
```python
import os
import sys
from promptflow._cli._pf._connection import create_connection
from streamlit.web import cli as st_cli
from streamlit.runtime import exists
from main import start
def is_yaml_file(file_path):
_, file_extension = os.path.splitext(file_path)
return file_extension.lower() in ('.yaml', '.yml')
def create_connections(directory_path) -> None:
for root, dirs, files in os.walk(directory_path):
for file in files:
file_path = os.path.join(root, file)
if is_yaml_file(file_path):
create_connection(file_path)
if __name__ == "__main__":
create_connections(os.path.join(os.path.dirname(__file__), "connections"))
if exists():
start()
else:
main_script = os.path.join(os.path.dirname(__file__), "main.py")
sys.argv = ["streamlit", "run", main_script, "--global.developmentMode=false"]
st_cli.main(prog_name="streamlit")
```
### A template script of the spec file
The spec file tells PyInstaller how to process your script. It encodes the script names and most of the options you give to the pyinstaller command. The spec file is actually executable Python code. PyInstaller builds the app by executing the contents of the spec file.
To streamline this process, we offer a `app.spec` spec file that bundles the application into a single file. For additional information on spec files, you can refer to the [Using Spec Files](https://pyinstaller.org/en/stable/spec-files.html). Please replace `streamlit_runtime_interpreter_path` with the path of streamlit runtime interpreter in your environment.
```spec
# -*- mode: python ; coding: utf-8 -*-
from PyInstaller.utils.hooks import collect_data_files
from PyInstaller.utils.hooks import copy_metadata
datas = [('connections', 'connections'), ('flow', 'flow'), ('settings.json', '.'), ('main.py', '.'), ('{{streamlit_runtime_interpreter_path}}', './streamlit/runtime')]
datas += collect_data_files('streamlit')
datas += copy_metadata('streamlit')
datas += collect_data_files('keyrings.alt', include_py_files=True)
datas += copy_metadata('keyrings.alt')
datas += collect_data_files('streamlit_quill')
block_cipher = None
a = Analysis(
['app.py', 'main.py'],
pathex=[],
binaries=[],
datas=datas,
hiddenimports=['bs4'],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False,
)
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
exe = EXE(
pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='app',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True,
disable_windowed_traceback=False,
argv_emulation=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None,
)
```
### The bundled application using Pyinstaller
Once you've build a flow as executable format following [Build a flow as executable format](#build-a-flow-as-executable-format).
It will create two folders named `build` and `dist` within your specified output directory, denoted as <your-output-dir>. The `build` folder houses various log and working files, while the `dist` folder contains the `app` executable application.
### Connections
If the service involves connections, all related connections will be exported as yaml files and recreated in the executable package.
Secrets in connections won't be exported directly. Instead, we will export them as a reference to environment variables:
```yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/OpenAIConnection.schema.json
type: open_ai
name: open_ai_connection
module: promptflow.connections
api_key: ${env:OPEN_AI_CONNECTION_API_KEY} # env reference
```
## Test the endpoint
Finally, You can distribute the bundled application `app` to other people. They can execute your program by double clicking the executable file, e.g. `app.exe` in Windows system or running the binary file, e.g. `app` in Linux system.
The development server has a built-in web page they can use to test the flow by opening 'http://localhost:8501' in the browser. The expected result is as follows: if the flow served successfully, the process will keep alive until it is killed manually.
To your users, the app is self-contained. They do not need to install any particular version of Python or any modules. They do not need to have Python installed at all.
**Note**: The executable generated is not cross-platform. One platform (e.g. Windows) packaged executable can't run on others (Mac, Linux).
## Known issues
1. Note that Python 3.10.0 contains a bug making it unsupportable by PyInstaller. PyInstaller will also not work with beta releases of Python 3.13.
## Next steps
- Try the example [here](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/flow-deploy) | promptflow/docs/how-to-guides/deploy-a-flow/distribute-flow-as-executable-app.md/0 | {
"file_path": "promptflow/docs/how-to-guides/deploy-a-flow/distribute-flow-as-executable-app.md",
"repo_id": "promptflow",
"token_count": 2459
} | 1 |
# Use streaming endpoints deployed from prompt flow
In prompt flow, you can [deploy flow as REST endpoint](./deploy-a-flow/index.md) for real-time inference.
When consuming the endpoint by sending a request, the default behavior is that the online endpoint will keep waiting until the whole response is ready, and then send it back to the client. This can cause a long delay for the client and a poor user experience.
To avoid this, you can use streaming when you consume the endpoints. Once streaming enabled, you don't have to wait for the whole response ready. Instead, the server will send back the response in chunks as they are generated. The client can then display the response progressively, with less waiting time and more interactivity.
This article will describe the scope of streaming, how streaming works, and how to consume streaming endpoints.
## Create a streaming enabled flow
If you want to use the streaming mode, you need to create a flow that has a node that produces a string generator as the flow’s output. A string generator is an object that can return one string at a time when requested. You can use the following types of nodes to create a string generator:
- LLM node: This node uses a large language model to generate natural language responses based on the input.
```jinja
{# Sample prompt template for LLM node #}
system:
You are a helpful assistant.
user:
{{question}}
```
- Python tools node: This node allows you to write custom Python code that can yield string outputs. You can use this node to call external APIs or libraries that support streaming. For example, you can use this code to echo the input word by word:
```python
from promptflow import tool
# Sample code echo input by yield in Python tool node
@tool
def my_python_tool(paragraph: str) -> str:
yield "Echo: "
for word in paragraph.split():
yield word + " "
```
In this guide, we will use the ["Chat with Wikipedia"](https://github.com/microsoft/promptflow/tree/main/examples/flows/chat/chat-with-wikipedia) sample flow as an example. This flow processes the user’s question, searches Wikipedia for relevant articles, and answers the question with information from the articles. It uses streaming mode to show the progress of the answer generation.
![chat_wikipedia.png](../media/how-to-guides/how-to-enable-streaming-mode/chat_wikipedia_center.png)
## Deploy the flow as an online endpoint
To use the streaming mode, you need to deploy your flow as an online endpoint. This will allow you to send requests and receive responses from your flow in real time.
Follow [this guide](./deploy-a-flow/index.md) to deploy your flow as an online endpoint.
> [!NOTE]
>
> You can follow this document to deploy an [online endpoint](https://learn.microsoft.com/en-us/azure/machine-learning/prompt-flow/how-to-deploy-for-real-time-inference?view=azureml-api-2).
> Please deploy with runtime environment version later than version `20230816.v10`.
> You can check your runtime version and update runtime in the run time detail page.
## Understand the streaming process
When you have an online endpoint, the client and the server need to follow specific principles for [content negotiation](https://developer.mozilla.org/en-US/docs/Web/HTTP/Content_negotiation) to utilize the streaming mode:
Content negotiation is like a conversation between the client and the server about the preferred format of the data they want to send and receive. It ensures effective communication and agreement on the format of the exchanged data.
To understand the streaming process, consider the following steps:
- First, the client constructs an HTTP request with the desired media type included in the `Accept` header. The media type tells the server what kind of data format the client expects. It's like the client saying, "Hey, I'm looking for a specific format for the data you'll send me. It could be JSON, text, or something else." For example, `application/json` indicates a preference for JSON data, `text/event-stream` indicates a desire for streaming data, and `*/*` means the client accepts any data format.
> [!NOTE]
>
> If a request lacks an `Accept` header or has empty `Accept` header, it implies that the client will accept any media type in response. The server treats it as `*/*`.
- Next, the server responds based on the media type specified in the `Accept` header. It's important to note that the client may request multiple media types in the `Accept` header, and the server must consider its capabilities and format priorities to determine the appropriate response.
- First, the server checks if `text/event-stream` is explicitly specified in the `Accept` header:
- For a stream-enabled flow, the server returns a response with a `Content-Type` of `text/event-stream`, indicating that the data is being streamed.
- For a non-stream-enabled flow, the server proceeds to check for other media types specified in the header.
- If `text/event-stream` is not specified, the server then checks if `application/json` or `*/*` is specified in the `Accept` header:
- In such cases, the server returns a response with a `Content-Type` of `application/json`, providing the data in JSON format.
- If the `Accept` header specifies other media types, such as `text/html`:
- The server returns a `424` response with a PromptFlow runtime error code `UserError` and a runtime HTTP status `406`, indicating that the server cannot fulfill the request with the requested data format.
> Note: Please refer [handle errors](#handle-errors) for details.
- Finally, the client checks the `Content-Type` response header. If it is set to `text/event-stream`, it indicates that the data is being streamed.
Let’s take a closer look at how the streaming process works. The response data in streaming mode follows the format of [server-sent events (SSE)](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events).
The overall process works as follows:
### 0. The client sends a message to the server.
```
POST https://<your-endpoint>.inference.ml.azure.com/score
Content-Type: application/json
Authorization: Bearer <key or token of your endpoint>
Accept: text/event-stream
{
"question": "Hello",
"chat_history": []
}
```
> [!NOTE]
>
> The `Accept` header is set to `text/event-stream` to request a stream response.
### 1. The server sends back the response in streaming mode.
```
HTTP/1.1 200 OK
Content-Type: text/event-stream; charset=utf-8
Connection: close
Transfer-Encoding: chunked
data: {"answer": ""}
data: {"answer": "Hello"}
data: {"answer": "!"}
data: {"answer": " How"}
data: {"answer": " can"}
data: {"answer": " I"}
data: {"answer": " assist"}
data: {"answer": " you"}
data: {"answer": " today"}
data: {"answer": " ?"}
data: {"answer": ""}
```
Note that the `Content-Type` is set to `text/event-stream; charset=utf-8`, indicating the response is an event stream.
The client should decode the response data as server-sent events and display them incrementally. The server will close the HTTP connection after all the data is sent.
Each response event is the delta to the previous event. It is recommended for the client to keep track of the merged data in memory and send them back to the server as chat history in the next request.
### 2. The client sends another chat message, along with the full chat history, to the server.
```
POST https://<your-endpoint>.inference.ml.azure.com/score
Content-Type: application/json
Authorization: Bearer <key or token of your endpoint>
Accept: text/event-stream
{
"question": "Glad to know you!",
"chat_history": [
{
"inputs": {
"question": "Hello"
},
"outputs": {
"answer": "Hello! How can I assist you today?"
}
}
]
}
```
### 3. The server sends back the answer in streaming mode.
```
HTTP/1.1 200 OK
Content-Type: text/event-stream; charset=utf-8
Connection: close
Transfer-Encoding: chunked
data: {"answer": ""}
data: {"answer": "Nice"}
data: {"answer": " to"}
data: {"answer": " know"}
data: {"answer": " you"}
data: {"answer": " too"}
data: {"answer": "!"}
data: {"answer": " Is"}
data: {"answer": " there"}
data: {"answer": " anything"}
data: {"answer": " I"}
data: {"answer": " can"}
data: {"answer": " help"}
data: {"answer": " you"}
data: {"answer": " with"}
data: {"answer": "?"}
data: {"answer": ""}
```
### 4. The chat continues in a similar way.
## Handle errors
The client should check the HTTP response code first. See [this table](https://learn.microsoft.com/azure/machine-learning/how-to-troubleshoot-online-endpoints?view=azureml-api-2&tabs=cli#http-status-codes) for common error codes returned by online endpoints.
If the response code is "424 Model Error", it means that the error is caused by the model’s code. The error response from a PromptFlow model always follows this format:
```json
{
"error": {
"code": "UserError",
"message": "Media type text/event-stream in Accept header is not acceptable. Supported media type(s) - application/json",
}
}
```
* It is always a JSON dictionary with only one key "error" defined.
* The value for "error" is a dictionary, containing "code", "message".
* "code" defines the error category. Currently, it may be "UserError" for bad user inputs and "SystemError" for errors inside the service.
* "message" is a description of the error. It can be displayed to the end user.
## How to consume the server-sent events
### Consume using Python
In this sample usage, we are using the `SSEClient` class. This class is not a built-in Python class and needs to be installed separately. You can install it via pip:
```bash
pip install sseclient-py
```
A sample usage would like:
```python
import requests
from sseclient import SSEClient
from requests.exceptions import HTTPError
try:
response = requests.post(url, json=body, headers=headers, stream=stream)
response.raise_for_status()
content_type = response.headers.get('Content-Type')
if "text/event-stream" in content_type:
client = SSEClient(response)
for event in client.events():
# Handle event, i.e. print to stdout
else:
# Handle json response
except HTTPError:
# Handle exceptions
```
### Consume using JavaScript
There are several libraries to consume server-sent events in JavaScript. Here is [one of them as an example](https://www.npmjs.com/package/sse.js?activeTab=code).
## A sample chat app using Python
Here is a sample chat app written in Python.
(Click [here](../media/how-to-guides/how-to-enable-streaming-mode/scripts/chat_app.py) to view the source code.)
![chat_app](../media/how-to-guides/how-to-enable-streaming-mode/chat_app.gif)
## Advance usage - hybrid stream and non-stream flow output
Sometimes, you may want to get both stream and non-stream results from a flow output. For example, in the “Chat with Wikipedia” flow, you may want to get not only LLM’s answer, but also the list of URLs that the flow searched. To do this, you need to modify the flow to output a combination of stream LLM’s answer and non-stream URL list.
In the sample "Chat With Wikipedia" flow, the output is connected to the LLM node `augmented_chat`. To add the URL list to the output, you need to add an output field with the name `url` and the value `${get_wiki_url.output}`.
![chat_wikipedia_dual_output_center.png](../media/how-to-guides/how-to-enable-streaming-mode/chat_wikipedia_dual_output_center.png)
The output of the flow will be a non-stream field as the base and a stream field as the delta. Here is an example of request and response.
### 0. The client sends a message to the server.
```
POST https://<your-endpoint>.inference.ml.azure.com/score
Content-Type: application/json
Authorization: Bearer <key or token of your endpoint>
Accept: text/event-stream
{
"question": "When was ChatGPT launched?",
"chat_history": []
}
```
### 1. The server sends back the answer in streaming mode.
```
HTTP/1.1 200 OK
Content-Type: text/event-stream; charset=utf-8
Connection: close
Transfer-Encoding: chunked
data: {"url": ["https://en.wikipedia.org/w/index.php?search=ChatGPT", "https://en.wikipedia.org/w/index.php?search=GPT-4"]}
data: {"answer": ""}
data: {"answer": "Chat"}
data: {"answer": "G"}
data: {"answer": "PT"}
data: {"answer": " was"}
data: {"answer": " launched"}
data: {"answer": " on"}
data: {"answer": " November"}
data: {"answer": " "}
data: {"answer": "30"}
data: {"answer": ","}
data: {"answer": " "}
data: {"answer": "202"}
data: {"answer": "2"}
data: {"answer": "."}
data: {"answer": " \n\n"}
...
data: {"answer": "PT"}
data: {"answer": ""}
```
### 2. The client sends another chat message, along with the full chat history, to the server.
```
POST https://<your-endpoint>.inference.ml.azure.com/score
Content-Type: application/json
Authorization: Bearer <key or token of your endpoint>
Accept: text/event-stream
{
"question": "When did OpenAI announce GPT-4? How long is it between these two milestones?",
"chat_history": [
{
"inputs": {
"question": "When was ChatGPT launched?"
},
"outputs": {
"url": [
"https://en.wikipedia.org/w/index.php?search=ChatGPT",
"https://en.wikipedia.org/w/index.php?search=GPT-4"
],
"answer": "ChatGPT was launched on November 30, 2022. \n\nSOURCES: https://en.wikipedia.org/w/index.php?search=ChatGPT"
}
}
]
}
```
### 3. The server sends back the answer in streaming mode.
```
HTTP/1.1 200 OK
Content-Type: text/event-stream; charset=utf-8
Connection: close
Transfer-Encoding: chunked
data: {"url": ["https://en.wikipedia.org/w/index.php?search=Generative pre-trained transformer ", "https://en.wikipedia.org/w/index.php?search=Microsoft "]}
data: {"answer": ""}
data: {"answer": "Open"}
data: {"answer": "AI"}
data: {"answer": " released"}
data: {"answer": " G"}
data: {"answer": "PT"}
data: {"answer": "-"}
data: {"answer": "4"}
data: {"answer": " in"}
data: {"answer": " March"}
data: {"answer": " "}
data: {"answer": "202"}
data: {"answer": "3"}
data: {"answer": "."}
data: {"answer": " Chat"}
data: {"answer": "G"}
data: {"answer": "PT"}
data: {"answer": " was"}
data: {"answer": " launched"}
data: {"answer": " on"}
data: {"answer": " November"}
data: {"answer": " "}
data: {"answer": "30"}
data: {"answer": ","}
data: {"answer": " "}
data: {"answer": "202"}
data: {"answer": "2"}
data: {"answer": "."}
data: {"answer": " The"}
data: {"answer": " time"}
data: {"answer": " between"}
data: {"answer": " these"}
data: {"answer": " two"}
data: {"answer": " milestones"}
data: {"answer": " is"}
data: {"answer": " approximately"}
data: {"answer": " "}
data: {"answer": "3"}
data: {"answer": " months"}
data: {"answer": ".\n\n"}
...
data: {"answer": "Chat"}
data: {"answer": "G"}
data: {"answer": "PT"}
data: {"answer": ""}
```
| promptflow/docs/how-to-guides/enable-streaming-mode.md/0 | {
"file_path": "promptflow/docs/how-to-guides/enable-streaming-mode.md",
"repo_id": "promptflow",
"token_count": 4715
} | 2 |
# Azure AI Language
Azure AI Language enables users with task-oriented and optimized pre-trained language models to effectively understand documents and conversations. This Prompt flow tool is a wrapper for various Azure AI Language APIs. The current list of supported capabilities is as follows:
| Name | Description |
|-------------------------------------------|-------------------------------------------------------|
| Abstractive Summarization | Generate abstractive summaries from documents. |
| Extractive Summarization | Extract summaries from documents. |
| Conversation Summarization | Summarize conversations. |
| Entity Recognition | Recognize and categorize entities in documents. |
| Key Phrase Extraction | Extract key phrases from documents. |
| Language Detection | Detect the language of documents. |
| PII Entity Recognition | Recognize and redact PII entities in documents. |
| Sentiment Analysis | Analyze the sentiment of documents. |
| Conversational Language Understanding | Predict intents and entities from user's utterances. |
| Translator | Translate documents. |
## Requirements
- For AzureML users:
follow this [wiki](https://learn.microsoft.com/en-us/azure/machine-learning/prompt-flow/how-to-custom-tool-package-creation-and-usage?view=azureml-api-2#prepare-runtime), starting from `Prepare runtime`. Note that the PyPi package name is `promptflow-azure-ai-language`.
- For local users:
```
pip install promptflow-azure-ai-language
```
## Prerequisites
The tool calls APIs from Azure AI Language. To use it, you must create a connection to an [Azure AI Language resource](https://learn.microsoft.com/en-us/azure/ai-services/language-service/). Create a Language resource first, if necessary.
- In Prompt flow, add a new `CustomConnection`.
- Under the `secrets` field, specify the resource's API key: `api_key: <Azure AI Language Resource api key>`
- Under the `configs` field, specify the resource's endpoint: `endpoint: <Azure AI Language Resource endpoint>`
To use the `Translator` tool, you must set up an additional connection to an [Azure AI Translator resource](https://azure.microsoft.com/en-us/products/ai-services/ai-translator). [Create a Translator resource](https://learn.microsoft.com/en-us/azure/ai-services/translator/create-translator-resource) first, if necessary.
- In Prompt flow, add a new `CustomConnection`.
- Under the `secrets` field, specify the resource's API key: `api_key: <Azure AI Translator Resource api key>`
- Under the `configs` field, specify the resource's endpoint: `endpoint: <Azure AI Translator Resource endpoint>`
- If your Translator Resource is regional and non-global, specify its region under `configs` as well: `region: <Azure AI Translator Resource region>`
## Inputs
The tool accepts the following inputs:
- **Abstractive Summarization**:
| Name | Type | Description | Required |
|--------------------|------------------|-------------|----------|
| connection | CustomConnection | The created connection to an Azure AI Language resource. | Yes |
| language | string | The ISO 639-1 code for the language of the input. | Yes |
| text | string | The input text. | Yes |
| query | string | The query used to structure summarization. | Yes |
| summary_length | string (enum) | The desired summary length. Enum values are `short`, `medium`, and `long`. | No |
| parse_response | bool | Should the raw API json output be parsed. Default value is `False`. | No |
- **Extractive Summarization**:
| Name | Type | Description | Required |
|--------------------|------------------|-------------|----------|
| connection | CustomConnection | The created connection to an Azure AI Language resource. | Yes |
| language | string | The ISO 639-1 code for the language of the input. | Yes |
| text | string | The input text. | Yes |
| query | string | The query used to structure summarization. | Yes |
| sentence_count | int | The desired number of output summary sentences. Default value is `3`. | No |
| sort_by | string (enum) | The sorting criteria for extractive summarization results. Enum values are `Offset` to sort results in order of appearance in the text and `Rank` to sort results in order of importance (i.e. rank score) according to model. Default value is `Offset`. | No |
| parse_response | bool | Should the raw API json output be parsed. Default value is `False`. | No |
- **Conversation Summarization**:
| Name | Type | Description | Required |
|--------------------|------------------|-------------|----------|
| connection | CustomConnection | The created connection to an Azure AI Language resource. | Yes |
| language | string | The ISO 639-1 code for the language of the input. | Yes |
| text | string | The input text. Text should be of the following form: `<speaker id>: <speaker text> \n <speaker id>: <speaker text> \n ...` | Yes |
| modality | string (enum) | The modality of the input text. Enum values are `text` for input from a text source, and `transcript` for input from a transcript source. | Yes |
| summary_aspect | string (enum) | The desired summary "aspect" to obtain. Enum values are `chapterTitle` to obtain the chapter title of any conversation, `issue` to obtain the summary of issues in transcripts of web chats and service calls between customer-service agents and customers, `narrative` to obtain the generic summary of any conversation, `resolution` to obtain the summary of resolutions in transcripts of web chats and service calls between customer-service agents and customers, `recap` to obtain a general summary, and `follow-up tasks` to obtain a summary of follow-up or action items. | Yes |
| parse_response | bool | Should the raw API json output be parsed. Default value is `False`. | No |
- **Entity Recognition**:
| Name | Type | Description | Required |
|--------------------|------------------|-------------|----------|
| connection | CustomConnection | The created connection to an Azure AI Language resource. | Yes |
| language | string | The ISO 639-1 code for the language of the input. | Yes |
| text | string | The input text. | Yes |
| parse_response | bool | Should the raw API json output be parsed. Default value is `False`. | No |
- **Key Phrase Extraction**:
| Name | Type | Description | Required |
|--------------------|------------------|-------------|----------|
| connection | CustomConnection | The created connection to an Azure AI Language resource. | Yes |
| language | string | The ISO 639-1 code for the language of the input. | Yes |
| text | string | The input text. | Yes |
| parse_response | bool | Should the raw API json output be parsed. Default value is `False`. | No |
- **Language Detection**:
| Name | Type | Description | Required |
|--------------------|------------------|-------------|----------|
| connection | CustomConnection | The created connection to an Azure AI Language resource. | Yes |
| text | string | The input text. | Yes |
| parse_response | bool | Should the raw API json output be parsed. Default value is `False`. | No |
- **PII Entity Recognition**:
| Name | Type | Description | Required |
|--------------------|------------------|-------------|----------|
| connection | CustomConnection | The created connection to an Azure AI Language resource. | Yes |
| language | string | The ISO 639-1 code for the language of the input. | Yes |
| text | string | The input text. | Yes |
| domain | string (enum) | The PII domain used for PII Entity Recognition. Enum values are `none` for no domain, or `phi` to indicate that entities in the Personal Health domain should be redacted. Default value is `none`. | No |
| categories | list[string] | Describes the PII categories to return. Default value is `[]`. | No |
| parse_response | bool | Should the raw API json output be parsed. Default value is `False`. | No |
- **Sentiment Analysis**:
| Name | Type | Description | Required |
|--------------------|------------------|-------------|----------|
| connection | CustomConnection | The created connection to an Azure AI Language resource. | Yes |
| language | string | The ISO 639-1 code for the language of the input. | Yes |
| text | string | The input text. | Yes |
| opinion_mining | bool | Should opinion mining be enabled. Default value is `False`. | No |
| parse_response | bool | Should the raw API json output be parsed. Default value is `False`. | No |
- **Conversational Language Understanding**:
| Name | Type | Description | Required |
|--------------------|------------------|-------------|----------|
| connection | CustomConnection | The created connection to an Azure AI Language resource. | Yes |
| language | string | The ISO 639-1 code for the language of the input. | Yes |
| utterances | string | A single user utterance or a json array of user utterances. | Yes |
| project_name | string | The Conversational Language Understanding project to be called. | Yes |
| deployment_name | string | The Conversational Language Understanding project deployment to be called. | Yes |
| parse_response | bool | Should the raw API json output be parsed. Default value is `False`. | No |
- **Translator**:
| Name | Type | Description | Required |
|--------------------|------------------|-------------|----------|
| connection | CustomConnection | The created connection to an Azure AI Translator resource. | Yes |
| text | string | The input text. | Yes |
| to | list[string] | The languages to translate the input text to. | Yes |
| source_language | string | The language of the input text. | No |
| parse_response | bool | Should the raw API json output be parsed. Default value is `False`. | No |
## Outputs
If the input parameter `parse_response` is set to `False` (default value), the raw API json output will be returned as a string. Refer to the [REST API reference](https://learn.microsoft.com/en-us/rest/api/language/) for details on API output. For Conversational Language Understanding, the output will be a list of raw API json responses, one response for each user utterance in the input.
When `parse_response` is set to `True`, the tool will parse API output as follows:
| Name | Type | Description |
|-------------------------------------------------------------|--------|---------------------|
| Abstractive Summarization | string | Abstractive summary. |
| Extractive Summarization | list[string] | Extracted summary sentence strings. |
| Conversation Summarization | string | Conversation summary based on `summary_aspect`. |
| Entity Recognition | dict[string, string] | Recognized entities, where keys are entity names and values are entity categories. |
| Key Phrase Extraction | list[string] | Extracted key phrases as strings. |
| Language Detection | string | Detected language's ISO 639-1 code. |
| PII Entity Recognition | string | Input `text` with PII entities redacted. |
| Sentiment Analysis | string | Analyzed sentiment: `positive`, `neutral`, or `negative`. |
| Conversational Language Understanding | list[dict[string, string]] | List of user utterances and associated intents. |
| Translator | dict[string, string] | Translated text, where keys are the translated languages and values are the translated texts. |
| promptflow/docs/integrations/tools/azure-ai-language-tool.md/0 | {
"file_path": "promptflow/docs/integrations/tools/azure-ai-language-tool.md",
"repo_id": "promptflow",
"token_count": 4513
} | 3 |
# SerpAPI
## Introduction
The SerpAPI API is a Python tool that provides a wrapper to the [SerpAPI Google Search Engine Results API](https://serpapi.com/search-api) and [SerpApi Bing Search Engine Results API
](https://serpapi.com/bing-search-api).
We could use the tool to retrieve search results from a number of different search engines, including Google and Bing, and you can specify a range of search parameters, such as the search query, location, device type, and more.
## Prerequisite
Sign up at [SERP API homepage](https://serpapi.com/)
## Connection
Connection is the model used to establish connections with Serp API.
| Type | Name | API KEY |
|-------------|----------|----------|
| Serp | Required | Required |
_**API Key** is on SerpAPI account dashboard_
## Inputs
The **serp api** tool supports following parameters:
| Name | Type | Description | Required |
|----------|---------|---------------------------------------------------------------|----------|
| query | string | The search query to be executed. | Yes |
| engine | string | The search engine to use for the search. Default is 'google'. | Yes |
| num | integer | The number of search results to return.Default is 10. | No |
| location | string | The geographic location to execute the search from. | No |
| safe | string | The safe search mode to use for the search. Default is 'off'. | No |
## Outputs
The json representation from serpapi query.
| Engine | Return Type | Output |
|----------|-------------|-------------------------------------------------------|
| google | json | [Sample](https://serpapi.com/search-api#api-examples) |
| bing | json | [Sample](https://serpapi.com/bing-search-api) |
| promptflow/docs/reference/tools-reference/serp-api-tool.md/0 | {
"file_path": "promptflow/docs/reference/tools-reference/serp-api-tool.md",
"repo_id": "promptflow",
"token_count": 683
} | 4 |
from openai import OpenAIError
from promptflow.exceptions import ErrorTarget, SystemErrorException, UserErrorException
openai_error_code_ref_message = "Error reference: https://platform.openai.com/docs/guides/error-codes/api-errors"
def to_openai_error_message(e: Exception) -> str:
ex_type = type(e).__name__
if str(e) == "<empty message>":
msg = "The api key is invalid or revoked. " \
"You can correct or regenerate the api key of your connection."
return f"OpenAI API hits {ex_type}: {msg}"
# for models that do not support the `functions` parameter.
elif "Unrecognized request argument supplied: functions" in str(e):
msg = "Current model does not support the `functions` parameter. If you are using openai connection, then " \
"please use gpt-3.5-turbo, gpt-4, gpt-4-32k, gpt-3.5-turbo-0613 or gpt-4-0613. You can refer to " \
"https://platform.openai.com/docs/guides/gpt/function-calling. If you are using azure openai " \
"connection, then please first go to your Azure OpenAI resource, deploy model 'gpt-35-turbo' or " \
"'gpt-4' with version 0613, then go to prompt flow connection page, upgrade connection api version to " \
"'2023-07-01-preview'. You can refer to " \
"https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/function-calling."
return f"OpenAI API hits {ex_type}: {msg}"
elif "The completion operation does not work with the specified model" in str(e) or \
"logprobs, best_of and echo parameters are not available" in str(e):
msg = "The completion operation does not work with the current model. " \
"Completion API is a legacy api and is going to be deprecated soon. " \
"Please change to use Chat API for current model. " \
"You could refer to guideline at https://aka.ms/pfdoc/chat-prompt " \
"or view the samples in our gallery that contain 'Chat' in the name."
return f"OpenAI API hits {ex_type}: {msg}"
elif "Invalid content type. image_url is only supported by certain models" in str(e):
msg = "Current model does not support the image input. If you are using openai connection, then please use " \
"gpt-4-vision-preview. You can refer to https://platform.openai.com/docs/guides/vision." \
"If you are using azure openai connection, then please first go to your Azure OpenAI resource, " \
"create a GPT-4 Turbo with Vision deployment by selecting model name: \"gpt-4\" and "\
"model version \"vision-preview\". You can refer to " \
"https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/gpt-with-vision"
return f"OpenAI API hits {ex_type}: {msg}"
elif ("\'response_format\' of type" in str(e) and "is not supported with this model." in str(e))\
or ("Additional properties are not allowed" in str(e) and "unexpected) - \'response_format\'" in str(e)):
msg = "The response_format parameter needs to be a dictionary such as {\"type\": \"text\"}. " \
"The value associated with the type key should be either 'text' or 'json_object' " \
"If you are using openai connection, you can only set response_format to { \"type\": \"json_object\" } " \
"when calling gpt-3.5-turbo-1106 or gpt-4-1106-preview to enable JSON mode. You can refer to " \
"https://platform.openai.com/docs/guides/text-generation/json-mode. If you are using azure openai " \
"connection, then please first go to your Azure OpenAI resource, deploy model 'gpt-35-turbo-1106' or " \
"'gpt-4-1106-preview'. You can refer to " \
"https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/json-mode?tabs=python."
return f"OpenAI API hits {ex_type}: {msg}"
else:
return f"OpenAI API hits {ex_type}: {str(e)} [{openai_error_code_ref_message}]"
class WrappedOpenAIError(UserErrorException):
"""Refine error messages on top of native openai errors."""
def __init__(self, ex: OpenAIError, **kwargs):
self._ex = ex
super().__init__(target=ErrorTarget.TOOL, **kwargs)
@property
def message(self):
return str(to_openai_error_message(self._ex))
@property
def error_codes(self):
"""The hierarchy of the error codes.
We follow the "Microsoft REST API Guidelines" to define error codes in a hierarchy style.
See the below link for details:
https://github.com/microsoft/api-guidelines/blob/vNext/Guidelines.md#7102-error-condition-responses
This list will be converted into an error code hierarchy by the prompt flow framework.
For this case, it will be converted into a data structure that equivalent to:
{
"code": "UserError",
"innerError": {
"code": "OpenAIError",
"innerError": {
"code": self._ex.__class__.__name__,
"innerError": None
}
}
}
"""
return ["UserError", "OpenAIError", self._ex.__class__.__name__]
class ExceedMaxRetryTimes(WrappedOpenAIError):
"""Base exception raised when retry exceeds max times."""
@property
def message(self):
return "Exceed max retry times. " + super().message
class ToolValidationError(UserErrorException):
"""Base exception raised when failed to validate tool."""
def __init__(self, **kwargs):
super().__init__(**kwargs, target=ErrorTarget.TOOL)
class LLMError(UserErrorException):
"""Base exception raised when failed to call openai api with non-OpenAIError."""
def __init__(self, **kwargs):
super().__init__(**kwargs, target=ErrorTarget.TOOL)
class JinjaTemplateError(ToolValidationError):
"""Base exception raised when failed to render jinja template."""
pass
class ChatAPIInvalidRole(ToolValidationError):
"""Base exception raised when failed to validate chat api role."""
pass
class ChatAPIFunctionRoleInvalidFormat(ToolValidationError):
"""Base exception raised when failed to validate chat api function role format."""
pass
class ChatAPIInvalidFunctions(ToolValidationError):
"""Base exception raised when failed to validate functions when call chat api."""
pass
class FunctionCallNotSupportedInStreamMode(ToolValidationError):
"""Base exception raised when use functions parameter in stream mode when call chat api."""
pass
class InvalidConnectionType(ToolValidationError):
"""Base exception raised when failed to pass invalid connection type."""
pass
class SerpAPISystemError(SystemErrorException):
"""Base exception raised when failed to call serp api with system error."""
def __init__(self, **kwargs):
super().__init__(**kwargs, target=ErrorTarget.TOOL)
class SerpAPIUserError(UserErrorException):
"""Base exception raised when failed to call serp api with user error."""
def __init__(self, **kwargs):
super().__init__(**kwargs, target=ErrorTarget.TOOL)
class OpenModelLLMOnlineEndpointError(UserErrorException):
"""Base exception raised when the call to an online endpoint failed."""
def __init__(self, **kwargs):
super().__init__(**kwargs, target=ErrorTarget.TOOL)
class OpenModelLLMUserError(UserErrorException):
"""Base exception raised when the call to Open Model LLM failed with a user error."""
def __init__(self, **kwargs):
super().__init__(**kwargs, target=ErrorTarget.TOOL)
class OpenModelLLMKeyValidationError(ToolValidationError):
"""Base exception raised when failed to validate functions when call chat api."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
class AzureContentSafetyInputValueError(UserErrorException):
"""Base exception raised when the input type of Azure Content Safety is invalid."""
def __init__(self, **kwargs):
super().__init__(**kwargs, target=ErrorTarget.TOOL)
class AzureContentSafetySystemError(SystemErrorException):
"""Base exception raised when failed to call Azure Content Safety api with system error."""
def __init__(self, **kwargs):
super().__init__(**kwargs, target=ErrorTarget.TOOL)
| promptflow/src/promptflow-tools/promptflow/tools/exception.py/0 | {
"file_path": "promptflow/src/promptflow-tools/promptflow/tools/exception.py",
"repo_id": "promptflow",
"token_count": 3068
} | 5 |
DEFAULT_SUBSCRIPTION_ID="your-subscription-id"
DEFAULT_RESOURCE_GROUP_NAME="your-resource-group-name"
DEFAULT_WORKSPACE_NAME="your-workspace-name"
DEFAULT_RUNTIME_NAME="test-runtime-ci"
PROMPT_FLOW_TEST_MODE="replay"
| promptflow/src/promptflow/.env.example/0 | {
"file_path": "promptflow/src/promptflow/.env.example",
"repo_id": "promptflow",
"token_count": 85
} | 6 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import argparse
import importlib
import json
import os
import shutil
import subprocess
import sys
import tempfile
import webbrowser
from pathlib import Path
from promptflow._cli._params import (
add_param_config,
add_param_entry,
add_param_environment_variables,
add_param_flow_display_name,
add_param_function,
add_param_inputs,
add_param_prompt_template,
add_param_source,
add_param_yes,
add_parser_build,
base_params,
)
from promptflow._cli._pf._init_entry_generators import (
AzureOpenAIConnectionGenerator,
ChatFlowDAGGenerator,
FlowDAGGenerator,
OpenAIConnectionGenerator,
StreamlitFileReplicator,
ToolMetaGenerator,
ToolPyGenerator,
copy_extra_files,
)
from promptflow._cli._pf._run import exception_handler
from promptflow._cli._utils import _copy_to_flow, activate_action, confirm, inject_sys_path, list_of_dict_to_dict
from promptflow._constants import FlowLanguage
from promptflow._sdk._configuration import Configuration
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, ConnectionProvider
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk.operations._flow_operations import FlowOperations
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.exceptions import ErrorTarget, UserErrorException
DEFAULT_CONNECTION = "open_ai_connection"
DEFAULT_DEPLOYMENT = "gpt-35-turbo"
logger = get_cli_sdk_logger()
def add_flow_parser(subparsers):
"""Add flow parser to the pf subparsers."""
flow_parser = subparsers.add_parser(
"flow",
description="Manage flows for promptflow.",
help="pf flow",
)
flow_subparsers = flow_parser.add_subparsers()
add_parser_init_flow(flow_subparsers)
add_parser_test_flow(flow_subparsers)
add_parser_serve_flow(flow_subparsers)
add_parser_build(flow_subparsers, "flow")
add_parser_validate_flow(flow_subparsers)
flow_parser.set_defaults(action="flow")
def dispatch_flow_commands(args: argparse.Namespace):
if args.sub_action == "init":
init_flow(args)
elif args.sub_action == "test":
test_flow(args)
elif args.sub_action == "serve":
serve_flow(args)
elif args.sub_action == "build":
build_flow(args)
elif args.sub_action == "validate":
validate_flow(args)
def add_parser_init_flow(subparsers):
"""Add flow create parser to the pf flow subparsers."""
epilog = """
Examples:
# Creating a flow folder with code/prompts and yaml definitions of the flow:
pf flow init --flow my-awesome-flow
# Creating an eval prompt flow:
pf flow init --flow my-awesome-flow --type evaluation
# Creating a flow in existing folder
pf flow init --flow intent_copilot --entry intent.py --function extract_intent --prompt-template prompt_template=tpl.jinja2
""" # noqa: E501
add_param_type = lambda parser: parser.add_argument( # noqa: E731
"--type",
type=str,
choices=["standard", "evaluation", "chat"],
help="The initialized flow type.",
default="standard",
)
add_param_connection = lambda parser: parser.add_argument( # noqa: E731
"--connection", type=str, help=argparse.SUPPRESS
)
add_param_deployment = lambda parser: parser.add_argument( # noqa: E731
"--deployment", type=str, help=argparse.SUPPRESS
)
add_params = [
add_param_type,
add_param_yes,
add_param_flow_display_name,
add_param_entry,
add_param_function,
add_param_prompt_template,
add_param_connection,
add_param_deployment,
] + base_params
activate_action(
name="init",
description="Creating a flow folder with code/prompts and yaml definitions of the flow.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Initialize a prompt flow directory.",
action_param_name="sub_action",
)
def add_parser_serve_flow(subparsers):
"""Add flow serve parser to the pf flow subparsers."""
epilog = """
Examples:
# Serve flow as an endpoint:
pf flow serve --source <path_to_flow>
# Serve flow as an endpoint with specific port and host:
pf flow serve --source <path_to_flow> --port 8080 --host localhost --environment-variables key1="`${my_connection.api_key}" key2="value2"
# Serve flow without opening browser:
pf flow serve --source <path_to_flow> --skip-open-browser
""" # noqa: E501
add_param_port = lambda parser: parser.add_argument( # noqa: E731
"--port", type=int, default=8080, help="The port on which endpoint to run."
)
add_param_host = lambda parser: parser.add_argument( # noqa: E731
"--host", type=str, default="localhost", help="The host of endpoint."
)
add_param_static_folder = lambda parser: parser.add_argument( # noqa: E731
"--static_folder", type=str, help=argparse.SUPPRESS
)
add_param_skip_browser = lambda parser: parser.add_argument( # noqa: E731
"--skip-open-browser", action="store_true", default=False, help="Skip open browser for flow serving."
)
activate_action(
name="serve",
description="Serving a flow as an endpoint.",
epilog=epilog,
add_params=[
add_param_source,
add_param_port,
add_param_host,
add_param_static_folder,
add_param_environment_variables,
add_param_config,
add_param_skip_browser,
]
+ base_params,
subparsers=subparsers,
help_message="Serving a flow as an endpoint.",
action_param_name="sub_action",
)
def add_parser_validate_flow(subparsers):
"""Add flow validate parser to the pf flow subparsers."""
epilog = """
Examples:
# Validate flow
pf flow validate --source <path_to_flow>
""" # noqa: E501
activate_action(
name="validate",
description="Validate a flow and generate flow.tools.json for the flow.",
epilog=epilog,
add_params=[
add_param_source,
]
+ base_params,
subparsers=subparsers,
help_message="Validate a flow. Will raise error if the flow is not valid.",
action_param_name="sub_action",
)
def add_parser_test_flow(subparsers):
"""Add flow test parser to the pf flow subparsers."""
epilog = """
Examples:
# Test the flow:
pf flow test --flow my-awesome-flow
# Test the flow with inputs:
pf flow test --flow my-awesome-flow --inputs key1=val1 key2=val2
# Test the flow with specified variant node:
pf flow test --flow my-awesome-flow --variant ${node_name.variant_name}
# Test the single node in the flow:
pf flow test --flow my-awesome-flow --node node_name
# Chat in the flow:
pf flow test --flow my-awesome-flow --node node_name --interactive
""" # noqa: E501
add_param_flow = lambda parser: parser.add_argument( # noqa: E731
"--flow", type=str, required=True, help="the flow directory to test."
)
add_param_node = lambda parser: parser.add_argument( # noqa: E731
"--node", type=str, help="the node name in the flow need to be tested."
)
add_param_variant = lambda parser: parser.add_argument( # noqa: E731
"--variant", type=str, help="Node & variant name in format of ${node_name.variant_name}."
)
add_param_interactive = lambda parser: parser.add_argument( # noqa: E731
"--interactive", action="store_true", help="start a interactive chat session for chat flow."
)
add_param_multi_modal = lambda parser: parser.add_argument( # noqa: E731
"--multi-modal", action="store_true", help=argparse.SUPPRESS
)
add_param_ui = lambda parser: parser.add_argument("--ui", action="store_true", help=argparse.SUPPRESS) # noqa: E731
add_param_input = lambda parser: parser.add_argument("--input", type=str, help=argparse.SUPPRESS) # noqa: E731
add_param_detail = lambda parser: parser.add_argument( # noqa: E731
"--detail", type=str, default=None, required=False, help=argparse.SUPPRESS
)
add_param_experiment = lambda parser: parser.add_argument( # noqa: E731
"--experiment", type=str, help="the experiment template path of flow."
)
add_params = [
add_param_flow,
add_param_node,
add_param_variant,
add_param_interactive,
add_param_input,
add_param_inputs,
add_param_environment_variables,
add_param_multi_modal,
add_param_ui,
add_param_config,
add_param_detail,
] + base_params
if Configuration.get_instance().is_internal_features_enabled():
add_params.append(add_param_experiment)
activate_action(
name="test",
description="Test the flow.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Test the prompt flow or flow node.",
action_param_name="sub_action",
)
def init_flow(args):
if any([args.entry, args.prompt_template]):
print("Creating flow from existing folder...")
prompt_tpl = {}
if args.prompt_template:
for _dct in args.prompt_template:
prompt_tpl.update(**_dct)
_init_existing_flow(args.flow, args.entry, args.function, prompt_tpl)
else:
# Create an example flow
print("Creating flow from scratch...")
_init_flow_by_template(args.flow, args.type, args.yes, args.connection, args.deployment)
def _init_existing_flow(flow_name, entry=None, function=None, prompt_params: dict = None):
flow_path = Path(flow_name).resolve()
if not function:
logger.error("--function must be specified when --entry is specified.")
return
if not flow_path.exists():
logger.error(f"{flow_path.resolve()} must exist when --entry specified.")
return
print(f"Change working directory to .. {flow_path.resolve()}")
os.chdir(flow_path)
entry = Path(entry).resolve()
if not entry.exists():
logger.error(f"{entry} must exist.")
return
with inject_sys_path(flow_path):
# import function object
function_obj = getattr(importlib.import_module(entry.stem), function)
# Create tool.py
tool_py = f"{function}_tool.py"
python_tool = ToolPyGenerator(entry, function, function_obj)
tools = ToolMetaGenerator(tool_py, function, function_obj, prompt_params)
python_tool_inputs = [arg.name for arg in python_tool.tool_arg_list]
for tool_input in tools.prompt_params.keys():
if tool_input not in python_tool_inputs:
error = ValueError(f"Template parameter {tool_input} doesn't find in python function arguments.")
raise UserErrorException(target=ErrorTarget.CONTROL_PLANE_SDK, message=str(error), error=error)
python_tool.generate_to_file(tool_py)
# Create .promptflow and flow.tools.json
meta_dir = flow_path / PROMPT_FLOW_DIR_NAME
meta_dir.mkdir(parents=True, exist_ok=True)
tools.generate_to_file(meta_dir / "flow.tools.json")
# Create flow.dag.yaml
FlowDAGGenerator(tool_py, function, function_obj, prompt_params).generate_to_file("flow.dag.yaml")
copy_extra_files(flow_path=flow_path, extra_files=["requirements.txt", ".gitignore"])
print(f"Done. Generated flow in folder: {flow_path.resolve()}.")
def _init_chat_flow(flow_name, flow_path, connection=None, deployment=None):
from promptflow._sdk._configuration import Configuration
example_flow_path = Path(__file__).parent.parent / "data" / "chat_flow" / "flow_files"
for item in list(example_flow_path.iterdir()):
_copy_to_flow(flow_path=flow_path, source_file=item)
# Generate flow.dag.yaml to chat flow.
connection = connection or DEFAULT_CONNECTION
deployment = deployment or DEFAULT_DEPLOYMENT
ChatFlowDAGGenerator(connection=connection, deployment=deployment).generate_to_file(flow_path / "flow.dag.yaml")
# When customer not configure the remote connection provider, create connection yaml to chat flow.
is_local_connection = Configuration.get_instance().get_connection_provider() == ConnectionProvider.LOCAL
if is_local_connection:
OpenAIConnectionGenerator(connection=connection).generate_to_file(flow_path / "openai.yaml")
AzureOpenAIConnectionGenerator(connection=connection).generate_to_file(flow_path / "azure_openai.yaml")
copy_extra_files(flow_path=flow_path, extra_files=["requirements.txt", ".gitignore"])
print(f"Done. Created chat flow folder: {flow_path.resolve()}.")
if is_local_connection:
print(
f"The generated chat flow is requiring a connection named {connection}, "
"please follow the steps in README.md to create if you haven't done that."
)
else:
print(
f"The generated chat flow is requiring a connection named {connection}, "
"please ensure it exists in workspace."
)
flow_test_command = f"pf flow test --flow {flow_name} --interactive"
print(f"You can execute this command to test the flow, {flow_test_command}")
def _init_standard_or_evaluation_flow(flow_name, flow_path, flow_type):
example_flow_path = Path(__file__).parent.parent / "data" / f"{flow_type}_flow"
for item in list(example_flow_path.iterdir()):
_copy_to_flow(flow_path=flow_path, source_file=item)
copy_extra_files(flow_path=flow_path, extra_files=["requirements.txt", ".gitignore"])
print(f"Done. Created {flow_type} flow folder: {flow_path.resolve()}.")
flow_test_command = f"pf flow test --flow {flow_name} --input {os.path.join(flow_name, 'data.jsonl')}"
print(f"You can execute this command to test the flow, {flow_test_command}")
def _init_flow_by_template(flow_name, flow_type, overwrite=False, connection=None, deployment=None):
flow_path = Path(flow_name)
if flow_path.exists():
if not flow_path.is_dir():
logger.error(f"{flow_path.resolve()} is not a folder.")
return
answer = confirm(
"The flow folder already exists, do you want to create the flow in this existing folder?", overwrite
)
if not answer:
print("The 'pf init' command has been cancelled.")
return
flow_path.mkdir(parents=True, exist_ok=True)
if flow_type == "chat":
_init_chat_flow(flow_name=flow_name, flow_path=flow_path, connection=connection, deployment=deployment)
else:
_init_standard_or_evaluation_flow(flow_name=flow_name, flow_path=flow_path, flow_type=flow_type)
@exception_handler("Flow test")
def test_flow(args):
config = list_of_dict_to_dict(args.config)
pf_client = PFClient(config=config)
if args.environment_variables:
environment_variables = list_of_dict_to_dict(args.environment_variables)
else:
environment_variables = {}
inputs = _build_inputs_for_flow_test(args)
# Select different test mode
if Configuration.get_instance().is_internal_features_enabled() and args.experiment:
_test_flow_experiment(args, pf_client, inputs, environment_variables)
return
if args.multi_modal or args.ui:
_test_flow_multi_modal(args, pf_client)
return
if args.interactive:
_test_flow_interactive(args, pf_client, inputs, environment_variables)
return
_test_flow_standard(args, pf_client, inputs, environment_variables)
def _build_inputs_for_flow_test(args):
"""Build inputs from --input and --inputs for flow test."""
inputs = {}
if args.input:
from promptflow._utils.load_data import load_data
if args.input and not args.input.endswith(".jsonl"):
error = ValueError("Only support jsonl file as input.")
raise UserErrorException(
target=ErrorTarget.CONTROL_PLANE_SDK,
message=str(error),
error=error,
)
inputs = load_data(local_path=args.input)[0]
if args.inputs:
inputs.update(list_of_dict_to_dict(args.inputs))
return inputs
def _test_flow_multi_modal(args, pf_client):
"""Test flow with multi modality mode."""
from promptflow._sdk._load_functions import load_flow
with tempfile.TemporaryDirectory() as temp_dir:
flow = load_flow(args.flow)
script_path = [
os.path.join(temp_dir, "main.py"),
os.path.join(temp_dir, "utils.py"),
os.path.join(temp_dir, "logo.png"),
]
for script in script_path:
StreamlitFileReplicator(
flow_name=flow.display_name if flow.display_name else flow.name,
flow_dag_path=flow.flow_dag_path,
).generate_to_file(script)
main_script_path = os.path.join(temp_dir, "main.py")
logger.info("Start streamlit with main script generated at: %s", main_script_path)
pf_client.flows._chat_with_ui(script=main_script_path)
def _test_flow_interactive(args, pf_client, inputs, environment_variables):
"""Test flow with interactive mode."""
pf_client.flows._chat(
flow=args.flow,
inputs=inputs,
environment_variables=environment_variables,
variant=args.variant,
show_step_output=args.verbose,
)
def _test_flow_standard(args, pf_client, inputs, environment_variables):
"""Test flow with standard mode."""
result = pf_client.flows.test(
flow=args.flow,
inputs=inputs,
environment_variables=environment_variables,
variant=args.variant,
node=args.node,
allow_generator_output=False,
stream_output=False,
dump_test_result=True,
output_path=args.detail,
)
# Print flow/node test result
if isinstance(result, dict):
print(json.dumps(result, indent=4, ensure_ascii=False))
else:
print(result)
def _test_flow_experiment(args, pf_client, inputs, environment_variables):
"""Test flow with experiment specified."""
if args.variant or args.node:
error = ValueError("--variant or --node is not supported experiment is specified.")
raise UserErrorException(
target=ErrorTarget.CONTROL_PLANE_SDK,
message=str(error),
error=error,
)
node_results = pf_client.flows.test(
flow=args.flow,
inputs=inputs,
environment_variables=environment_variables,
experiment=args.experiment,
output_path=args.detail,
)
print(json.dumps(node_results, indent=4, ensure_ascii=False))
def serve_flow(args):
from promptflow._sdk._load_functions import load_flow
logger.info("Start serve model: %s", args.source)
# Set environment variable for local test
source = Path(args.source)
logger.info(
"Start promptflow server with port %s",
args.port,
)
os.environ["PROMPTFLOW_PROJECT_PATH"] = source.absolute().as_posix()
flow = load_flow(args.source)
if flow.language == FlowLanguage.CSharp:
serve_flow_csharp(args, source)
else:
serve_flow_python(args, source)
logger.info("Promptflow app ended")
def serve_flow_csharp(args, source):
from promptflow.batch._csharp_executor_proxy import EXECUTOR_SERVICE_DLL
try:
# Change working directory to model dir
logger.info(f"Change working directory to model dir {source}")
os.chdir(source)
command = [
"dotnet",
EXECUTOR_SERVICE_DLL,
"--port",
str(args.port),
"--yaml_path",
"flow.dag.yaml",
"--assembly_folder",
".",
"--connection_provider_url",
"",
"--log_path",
"",
"--serving",
]
subprocess.run(command, stdout=sys.stdout, stderr=sys.stderr)
except KeyboardInterrupt:
pass
def _resolve_python_flow_additional_includes(source) -> Path:
# Resolve flow additional includes
from promptflow import load_flow
flow = load_flow(source)
with FlowOperations._resolve_additional_includes(flow.path) as resolved_flow_path:
if resolved_flow_path == flow.path:
return source
# Copy resolved flow to temp folder if additional includes exists
# Note: DO NOT use resolved flow path directly, as when inner logic raise exception,
# temp dir will fail due to file occupied by other process.
temp_flow_path = Path(tempfile.TemporaryDirectory().name)
shutil.copytree(src=resolved_flow_path.parent, dst=temp_flow_path, dirs_exist_ok=True)
return temp_flow_path
def serve_flow_python(args, source):
from promptflow._sdk._serving.app import create_app
static_folder = args.static_folder
if static_folder:
static_folder = Path(static_folder).absolute().as_posix()
config = list_of_dict_to_dict(args.config)
source = _resolve_python_flow_additional_includes(source)
os.environ["PROMPTFLOW_PROJECT_PATH"] = source.absolute().as_posix()
logger.info(f"Change working directory to model dir {source}")
os.chdir(source)
app = create_app(
static_folder=static_folder,
environment_variables=list_of_dict_to_dict(args.environment_variables),
config=config,
)
if not args.skip_open_browser:
target = f"http://{args.host}:{args.port}"
logger.info(f"Opening browser {target}...")
webbrowser.open(target)
# Debug is not supported for now as debug will rerun command, and we changed working directory.
app.run(port=args.port, host=args.host)
def build_flow(args):
"""
i. `pf flow build --source <flow_folder> --output <output_folder> --variant <variant>`
ii. `pf flow build --source <flow_folder> --format docker --output <output_folder> --variant <variant>`
iii. `pf flow build --source <flow_folder> --format executable --output <output_folder> --variant <variant>`
# default to resolve variant and update flow.dag.yaml, support this in case customer want to keep the
variants for continuous development
# we can delay this before receiving specific customer request
v. `pf flow build --source <flow_folder> --output <output_folder> --keep-variants`
output structure:
flow/
.connections/
Dockerfile|executable.exe
...
"""
pf_client = PFClient()
pf_client.flows.build(
flow=args.source,
output=args.output,
format=args.format,
variant=args.variant,
flow_only=args.flow_only,
)
print(
f"Exported flow to {Path(args.output).absolute().as_posix()}.\n"
f"please check {Path(args.output).joinpath('README.md').absolute().as_posix()} "
f"for how to use it."
)
def validate_flow(args):
pf_client = PFClient()
validation_result = pf_client.flows.validate(
flow=args.source,
)
print(repr(validation_result))
if not validation_result.passed:
exit(1)
else:
exit(0)
| promptflow/src/promptflow/promptflow/_cli/_pf/_flow.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/_pf/_flow.py",
"repo_id": "promptflow",
"token_count": 9285
} | 7 |
# Chat flow
Chat flow is designed for conversational application development, building upon the capabilities of standard flow and providing enhanced support for chat inputs/outputs and chat history management. With chat flow, you can easily create a chatbot that handles chat input and output.
## Create connection for LLM tool to use
You can follow these steps to create a connection required by a LLM tool.
Currently, there are two connection types supported by LLM tool: "AzureOpenAI" and "OpenAI". If you want to use "AzureOpenAI" connection type, you need to create an Azure OpenAI service first. Please refer to [Azure OpenAI Service](https://azure.microsoft.com/en-us/products/cognitive-services/openai-service/) for more details. If you want to use "OpenAI" connection type, you need to create an OpenAI account first. Please refer to [OpenAI](https://platform.openai.com/) for more details.
```bash
# Override keys with --set to avoid yaml file changes
# Create open ai connection
pf connection create --file openai.yaml --set api_key=<your_api_key> --name open_ai_connection
# Create azure open ai connection
# pf connection create --file azure_openai.yaml --set api_key=<your_api_key> api_base=<your_api_base> --name open_ai_connection
```
Note in [flow.dag.yaml](flow.dag.yaml) we are using connection named `open_ai_connection`.
```bash
# show registered connection
pf connection show --name open_ai_connection
```
Please refer to connections [document](https://promptflow.azurewebsites.net/community/local/manage-connections.html) and [example](https://github.com/microsoft/promptflow/tree/main/examples/connections) for more details.
## Develop a chat flow
The most important elements that differentiate a chat flow from a standard flow are **Chat Input**, **Chat History**, and **Chat Output**.
- **Chat Input**: Chat input refers to the messages or queries submitted by users to the chatbot. Effectively handling chat input is crucial for a successful conversation, as it involves understanding user intentions, extracting relevant information, and triggering appropriate responses.
- **Chat History**: Chat history is the record of all interactions between the user and the chatbot, including both user inputs and AI-generated outputs. Maintaining chat history is essential for keeping track of the conversation context and ensuring the AI can generate contextually relevant responses. Chat History is a special type of chat flow input, that stores chat messages in a structured format.
- **Chat Output**: Chat output refers to the AI-generated messages that are sent to the user in response to their inputs. Generating contextually appropriate and engaging chat outputs is vital for a positive user experience.
A chat flow can have multiple inputs, but Chat History and Chat Input are required inputs in chat flow.
## Interact with chat flow
Promptflow CLI provides a way to start an interactive chat session for chat flow. Customer can use below command to start an interactive chat session:
```
pf flow test --flow <flow_folder> --interactive
```
After executing this command, customer can interact with the chat flow in the terminal. Customer can press **Enter** to send the message to chat flow. And customer can quit with **ctrl+C**.
Promptflow CLI will distinguish the output of different roles by color, <span style="color:Green">User input</span>, <span style="color:Gold">Bot output</span>, <span style="color:Blue">Flow script output</span>, <span style="color:Cyan">Node output</span>.
> =========================================<br>
> Welcome to chat flow, <You-flow-name>.<br>
> Press Enter to send your message.<br>
> You can quit with ctrl+C.<br>
> =========================================<br>
> <span style="color:Green">User:</span> What types of container software there are<br>
> <span style="color:Gold">Bot:</span> There are several types of container software available, including:<br>
> 1. Docker: This is one of the most popular containerization software that allows developers to package their applications into containers and deploy them across different environments.<br>
> 2. Kubernetes: This is an open-source container orchestration platform that automates the deployment, scaling, and management of containerized applications.<br>
>
> <span style="color:Green">User:</span> What's the different between them<br>
> <span style="color:Gold">Bot:</span> The main difference between the various container software systems is their functionality and purpose. Here are some key differences between them:<br>
> 1. Docker is more focused on container packaging and deployment, while Kubernetes is more focused on container orchestration and management.<br>
> 2. Kubernetes: Kubernetes is a container orchestration tool that helps manage and deploy containers at scale. It automates the deployment, scaling, and management of containerized applications across multiple hosts.<br>
If customer adds "--verbose" in the pf command, the output of each step will be displayed.
> =========================================<br>
> Welcome to chat flow, Template Chat Flow.<br>
> Press Enter to send your message.<br>
> You can quit with ctrl+C.<br>
> =========================================<br>
> <span style="color:Green">User:</span> What types of container software there are<br>
> <span style="color:Cyan">chat:</span> There are several types of container software available, including:<br>
> 1. Docker: A popular container platform that is widely used in the industry.<br>
> 2. Kubernetes: A container orchestration tool that helps manage and deploy containers at scale.<br>
>
> <span style="color:Gold">Bot:</span> There are several types of container software available, including:<br>
> 1. Docker: A popular container platform that is widely used in the industry.<br>
> 2. Kubernetes: A container orchestration tool that helps manage and deploy containers at scale.<br>
>
> <span style="color:Green">User:</span> What's the different between them<br>
> <span style="color:Cyan">chat:</span> The main differences between container software are in their architecture, feature sets, and use cases. Here are some brief explanations of the differences between the examples I listed:<br>
> 1. Docker: Docker is a container platform that is widely used for building, packaging, and deploying containerized applications. It is known for its ease of use, portability, and large ecosystem of tools and services.<br>
> 2. Kubernetes: Kubernetes is a container orchestration tool that helps manage and deploy containers at scale. It automates the deployment, scaling, and management of containerized applications across multiple hosts.<br>
>
> <span style="color:Gold">Bot:</span> The main differences between container software are in their architecture, feature sets, and use cases. Here are some brief explanations of the differences between the examples I listed:<br>
> 1. Docker: Docker is a container platform that is widely used for building, packaging, and deploying containerized applications. It is known for its ease of use, portability, and large ecosystem of tools and services.<br>
> 2. Kubernetes: Kubernetes is a container orchestration tool that helps manage and deploy containers at scale. It automates the deployment, scaling, and management of containerized applications across multiple hosts.<br>
| promptflow/src/promptflow/promptflow/_cli/data/chat_flow/flow_files/README.md/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/data/chat_flow/flow_files/README.md",
"repo_id": "promptflow",
"token_count": 1766
} | 8 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
class GeneratorProxy:
"""A proxy for generator that can record all items that have been yielded from the generator."""
def __init__(self, generator):
self._generator = generator
self._items = []
def __iter__(self):
return self
def __next__(self):
item = next(self._generator)
self._items.append(item)
return item
@property
def items(self):
return self._items
def generate_from_proxy(proxy: GeneratorProxy):
yield from proxy
| promptflow/src/promptflow/promptflow/_core/generator_proxy.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_core/generator_proxy.py",
"repo_id": "promptflow",
"token_count": 215
} | 9 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from promptflow._sdk._constants import BULK_RUN_ERRORS
from promptflow.exceptions import ErrorTarget, SystemErrorException, UserErrorException
class SDKError(UserErrorException):
"""SDK base class, target default is CONTROL_PLANE_SDK."""
def __init__(
self,
message="",
message_format="",
target: ErrorTarget = ErrorTarget.CONTROL_PLANE_SDK,
module=None,
**kwargs,
):
super().__init__(message=message, message_format=message_format, target=target, module=module, **kwargs)
class SDKInternalError(SystemErrorException):
"""SDK internal error."""
def __init__(
self,
message="",
message_format="",
target: ErrorTarget = ErrorTarget.CONTROL_PLANE_SDK,
module=None,
**kwargs,
):
super().__init__(message=message, message_format=message_format, target=target, module=module, **kwargs)
class RunExistsError(SDKError):
"""Exception raised when run already exists."""
pass
class RunNotFoundError(SDKError):
"""Exception raised if run cannot be found."""
pass
class InvalidRunStatusError(SDKError):
"""Exception raised if run status is invalid."""
pass
class UnsecureConnectionError(SDKError):
"""Exception raised if connection is not secure."""
pass
class DecryptConnectionError(SDKError):
"""Exception raised if connection decryption failed."""
pass
class StoreConnectionEncryptionKeyError(SDKError):
"""Exception raised if no keyring backend."""
pass
class InvalidFlowError(SDKError):
"""Exception raised if flow definition is not legal."""
pass
class ConnectionNotFoundError(SDKError):
"""Exception raised if connection is not found."""
pass
class InvalidRunError(SDKError):
"""Exception raised if run name is not legal."""
pass
class GenerateFlowToolsJsonError(SDKError):
"""Exception raised if flow tools json generation failed."""
pass
class BulkRunException(SDKError):
"""Exception raised when bulk run failed."""
def __init__(self, *, message="", failed_lines, total_lines, errors, module: str = None, **kwargs):
self.failed_lines = failed_lines
self.total_lines = total_lines
self._additional_info = {
BULK_RUN_ERRORS: errors,
}
message = f"First error message is: {message}"
# bulk run error is line error only when failed_lines > 0
if isinstance(failed_lines, int) and isinstance(total_lines, int) and failed_lines > 0:
message = f"Failed to run {failed_lines}/{total_lines} lines. " + message
super().__init__(message=message, target=ErrorTarget.RUNTIME, module=module, **kwargs)
@property
def additional_info(self):
"""Set the tool exception details as additional info."""
return self._additional_info
class RunOperationParameterError(SDKError):
"""Exception raised when list run failed."""
pass
class RunOperationError(SDKError):
"""Exception raised when run operation failed."""
pass
class FlowOperationError(SDKError):
"""Exception raised when flow operation failed."""
pass
class ExperimentExistsError(SDKError):
"""Exception raised when experiment already exists."""
pass
class ExperimentNotFoundError(SDKError):
"""Exception raised if experiment cannot be found."""
pass
class MultipleExperimentTemplateError(SDKError):
"""Exception raised if multiple experiment template yaml found."""
pass
class NoExperimentTemplateError(SDKError):
"""Exception raised if no experiment template yaml found."""
pass
class ExperimentValidationError(SDKError):
"""Exception raised if experiment validation failed."""
pass
class ExperimentValueError(SDKError):
"""Exception raised if experiment validation failed."""
pass
class ExperimentHasCycle(SDKError):
"""Exception raised if experiment validation failed."""
pass
class DownloadInternalError(SDKInternalError):
"""Exception raised if download internal error."""
pass
class ExperimentNodeRunFailedError(SDKError):
"""Orchestrator raised if node run failed."""
pass
class ExperimentNodeRunNotFoundError(SDKError):
"""ExpNodeRun raised if node run cannot be found."""
pass
class ExperimentCommandRunError(SDKError):
"""Exception raised if experiment validation failed."""
pass
| promptflow/src/promptflow/promptflow/_sdk/_errors.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_errors.py",
"repo_id": "promptflow",
"token_count": 1507
} | 10 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
from flask_restx import Api, Namespace, Resource, fields # noqa: F401
__all__ = [
"Api",
"Namespace",
"Resource",
"fields",
]
| promptflow/src/promptflow/promptflow/_sdk/_service/__init__.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_service/__init__.py",
"repo_id": "promptflow",
"token_count": 117
} | 11 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from promptflow._sdk._serving.utils import get_cost_up_to_now
from promptflow._sdk._serving.monitor.metrics import ResponseType
class StreamingMonitor:
"""StreamingMonitor is used to collect metrics & data for streaming response."""
def __init__(
self,
logger,
flow_id: str,
start_time: float,
inputs: dict,
outputs: dict,
req_id: str,
streaming_field_name: str,
metric_recorder,
data_collector,
) -> None:
self.logger = logger
self.flow_id = flow_id
self.start_time = start_time
self.inputs = inputs
self.outputs = outputs
self.streaming_field_name = streaming_field_name
self.req_id = req_id
self.metric_recorder = metric_recorder
self.data_collector = data_collector
self.response_message = []
def on_stream_start(self):
"""stream start call back function, record flow latency when first byte received."""
self.logger.info("start streaming response...")
if self.metric_recorder:
duration = get_cost_up_to_now(self.start_time)
self.metric_recorder.record_flow_latency(self.flow_id, 200, True, ResponseType.FirstByte.value, duration)
def on_stream_end(self, streaming_resp_duration: float):
"""stream end call back function, record flow latency and streaming response data when last byte received."""
if self.metric_recorder:
duration = get_cost_up_to_now(self.start_time)
self.metric_recorder.record_flow_latency(self.flow_id, 200, True, ResponseType.LastByte.value, duration)
self.metric_recorder.record_flow_streaming_response_duration(self.flow_id, streaming_resp_duration)
if self.data_collector:
response_content = "".join(self.response_message)
if self.streaming_field_name in self.outputs:
self.outputs[self.streaming_field_name] = response_content
self.data_collector.collect_flow_data(self.inputs, self.outputs, self.req_id)
self.logger.info("finish streaming response.")
def on_stream_event(self, message: str):
"""stream event call back function, record streaming response data chunk."""
self.response_message.append(message)
| promptflow/src/promptflow/promptflow/_sdk/_serving/monitor/streaming_monitor.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_serving/monitor/streaming_monitor.py",
"repo_id": "promptflow",
"token_count": 960
} | 12 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import collections
import datetime
import hashlib
import json
import multiprocessing
import os
import platform
import re
import shutil
import stat
import sys
import tempfile
import uuid
import zipfile
from contextlib import contextmanager
from enum import Enum
from functools import partial
from os import PathLike
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Tuple, Union
from urllib.parse import urlparse
import keyring
import pydash
from cryptography.fernet import Fernet
from filelock import FileLock
from jinja2 import Template
from keyring.errors import NoKeyringError
from marshmallow import ValidationError
import promptflow
from promptflow._constants import EXTENSION_UA, PF_NO_INTERACTIVE_LOGIN, PF_USER_AGENT, USER_AGENT
from promptflow._core.tool_meta_generator import generate_tool_meta_dict_by_file
from promptflow._core.tools_manager import gen_dynamic_list, retrieve_tool_func_result
from promptflow._sdk._constants import (
DAG_FILE_NAME,
DEFAULT_ENCODING,
FLOW_TOOLS_JSON,
FLOW_TOOLS_JSON_GEN_TIMEOUT,
HOME_PROMPT_FLOW_DIR,
KEYRING_ENCRYPTION_KEY_NAME,
KEYRING_ENCRYPTION_LOCK_PATH,
KEYRING_SYSTEM,
NODE,
NODE_VARIANTS,
NODES,
PROMPT_FLOW_DIR_NAME,
REFRESH_CONNECTIONS_DIR_LOCK_PATH,
REGISTRY_URI_PREFIX,
REMOTE_URI_PREFIX,
USE_VARIANTS,
VARIANTS,
CommonYamlFields,
ConnectionProvider,
)
from promptflow._sdk._errors import (
DecryptConnectionError,
GenerateFlowToolsJsonError,
StoreConnectionEncryptionKeyError,
UnsecureConnectionError,
)
from promptflow._sdk._vendor import IgnoreFile, get_ignore_file, get_upload_files_from_folder
from promptflow._utils.context_utils import _change_working_dir, inject_sys_path
from promptflow._utils.dataclass_serializer import serialize
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.yaml_utils import dump_yaml, load_yaml, load_yaml_string
from promptflow.contracts.tool import ToolType
from promptflow.exceptions import ErrorTarget, UserErrorException
logger = get_cli_sdk_logger()
def snake_to_camel(name):
return re.sub(r"(?:^|_)([a-z])", lambda x: x.group(1).upper(), name)
def find_type_in_override(params_override: Optional[list] = None) -> Optional[str]:
params_override = params_override or []
for override in params_override:
if CommonYamlFields.TYPE in override:
return override[CommonYamlFields.TYPE]
return None
# region Encryption
CUSTOMIZED_ENCRYPTION_KEY_IN_KEY_RING = None
ENCRYPTION_KEY_IN_KEY_RING = None
@contextmanager
def use_customized_encryption_key(encryption_key: str):
global CUSTOMIZED_ENCRYPTION_KEY_IN_KEY_RING
CUSTOMIZED_ENCRYPTION_KEY_IN_KEY_RING = encryption_key
yield
CUSTOMIZED_ENCRYPTION_KEY_IN_KEY_RING = None
def set_encryption_key(encryption_key: Union[str, bytes]):
if isinstance(encryption_key, bytes):
encryption_key = encryption_key.decode("utf-8")
keyring.set_password("promptflow", "encryption_key", encryption_key)
_encryption_key_lock = FileLock(KEYRING_ENCRYPTION_LOCK_PATH)
def get_encryption_key(generate_if_not_found: bool = False) -> str:
global CUSTOMIZED_ENCRYPTION_KEY_IN_KEY_RING
global ENCRYPTION_KEY_IN_KEY_RING
if CUSTOMIZED_ENCRYPTION_KEY_IN_KEY_RING is not None:
return CUSTOMIZED_ENCRYPTION_KEY_IN_KEY_RING
if ENCRYPTION_KEY_IN_KEY_RING is not None:
return ENCRYPTION_KEY_IN_KEY_RING
def _get_from_keyring():
try:
# Cache encryption key as mac will pop window to ask for permission when calling get_password
return keyring.get_password(KEYRING_SYSTEM, KEYRING_ENCRYPTION_KEY_NAME)
except NoKeyringError as e:
raise StoreConnectionEncryptionKeyError(
"System keyring backend service not found in your operating system. "
"See https://pypi.org/project/keyring/ to install requirement for different operating system, "
"or 'pip install keyrings.alt' to use the third-party backend. Reach more detail about this error at "
"https://microsoft.github.io/promptflow/how-to-guides/faq.html#connection-creation-failed-with-storeconnectionencryptionkeyerror" # noqa: E501
) from e
ENCRYPTION_KEY_IN_KEY_RING = _get_from_keyring()
if ENCRYPTION_KEY_IN_KEY_RING is not None or not generate_if_not_found:
return ENCRYPTION_KEY_IN_KEY_RING
_encryption_key_lock.acquire()
# Note: we access the keyring twice, as global var can't share across processes.
ENCRYPTION_KEY_IN_KEY_RING = _get_from_keyring()
if ENCRYPTION_KEY_IN_KEY_RING is not None:
return ENCRYPTION_KEY_IN_KEY_RING
try:
ENCRYPTION_KEY_IN_KEY_RING = Fernet.generate_key().decode("utf-8")
keyring.set_password(KEYRING_SYSTEM, KEYRING_ENCRYPTION_KEY_NAME, ENCRYPTION_KEY_IN_KEY_RING)
finally:
_encryption_key_lock.release()
return ENCRYPTION_KEY_IN_KEY_RING
def encrypt_secret_value(secret_value):
encryption_key = get_encryption_key(generate_if_not_found=True)
fernet_client = Fernet(encryption_key)
token = fernet_client.encrypt(secret_value.encode("utf-8"))
return token.decode("utf-8")
def decrypt_secret_value(connection_name, encrypted_secret_value):
encryption_key = get_encryption_key()
if encryption_key is None:
raise Exception("Encryption key not found in keyring.")
fernet_client = Fernet(encryption_key)
try:
return fernet_client.decrypt(encrypted_secret_value.encode("utf-8")).decode("utf-8")
except Exception as e:
if len(encrypted_secret_value) < 57:
# This is to workaround old custom secrets that are not encrypted with Fernet.
# Fernet token: https://github.com/fernet/spec/blob/master/Spec.md
# Format: Version ‖ Timestamp ‖ IV ‖ Ciphertext ‖ HMAC
# Version: 8 bits, Timestamp: 64 bits, IV: 128 bits, HMAC: 256 bits,
# Ciphertext variable length, multiple of 128 bits
# So the minimum length of a Fernet token is 57 bytes
raise UnsecureConnectionError(
f"Please delete and re-create connection {connection_name} "
f"due to a security issue in the old sdk version."
)
raise DecryptConnectionError(
f"Decrypt connection {connection_name} secret failed: {str(e)}. "
f"If you have ever changed your encryption key manually, "
f"please revert it back to the original one, or delete all connections and re-create them."
)
# endregion
def decorate_validation_error(schema: Any, pretty_error: str, additional_message: str = "") -> str:
return f"Validation for {schema.__name__} failed:\n\n {pretty_error} \n\n {additional_message}"
def load_from_dict(schema: Any, data: Dict, context: Dict, additional_message: str = "", **kwargs):
try:
return schema(context=context).load(data, **kwargs)
except ValidationError as e:
pretty_error = json.dumps(e.normalized_messages(), indent=2)
raise ValidationError(decorate_validation_error(schema, pretty_error, additional_message))
def strip_quotation(value):
"""
To avoid escaping chars in command args, args will be surrounded in quotas.
Need to remove the pair of quotation first.
"""
if value.startswith('"') and value.endswith('"'):
return value[1:-1]
elif value.startswith("'") and value.endswith("'"):
return value[1:-1]
else:
return value
def parse_variant(variant: str) -> Tuple[str, str]:
variant_regex = r"\${([^.]+).([^}]+)}"
match = re.match(variant_regex, strip_quotation(variant))
if match:
return match.group(1), match.group(2)
else:
error = ValueError(
f"Invalid variant format: {variant}, variant should be in format of ${{TUNING_NODE.VARIANT}}"
)
raise UserErrorException(
target=ErrorTarget.CONTROL_PLANE_SDK,
message=str(error),
error=error,
)
def _match_reference(env_val: str):
env_val = env_val.strip()
m = re.match(r"^\$\{([^.]+)\.([^.]+)}$", env_val)
if not m:
return None, None
name, key = m.groups()
return name, key
# !!! Attention!!!: Please make sure you have contact with PRS team before changing the interface.
def get_used_connection_names_from_environment_variables():
"""The function will get all potential related connection names from current environment variables.
for example, if part of env var is
{
"ENV_VAR_1": "${my_connection.key}",
"ENV_VAR_2": "${my_connection.key2}",
"ENV_VAR_3": "${my_connection2.key}",
}
The function will return {"my_connection", "my_connection2"}.
"""
return get_used_connection_names_from_dict(os.environ)
def get_used_connection_names_from_dict(connection_dict: dict):
connection_names = set()
for key, val in connection_dict.items():
connection_name, _ = _match_reference(val)
if connection_name:
connection_names.add(connection_name)
return connection_names
# !!! Attention!!!: Please make sure you have contact with PRS team before changing the interface.
def update_environment_variables_with_connections(built_connections):
"""The function will result env var value ${my_connection.key} to the real connection keys."""
return update_dict_value_with_connections(built_connections, os.environ)
def _match_env_reference(val: str):
try:
val = val.strip()
m = re.match(r"^\$\{env:(.+)}$", val)
if not m:
return None
name = m.groups()[0]
return name
except Exception:
# for exceptions when val is not a string, return
return None
def override_connection_config_with_environment_variable(connections: Dict[str, dict]):
"""
The function will use relevant environment variable to override connection configurations. For instance, if there
is a custom connection named 'custom_connection' with a configuration key called 'chat_deployment_name,' the
function will attempt to retrieve 'chat_deployment_name' from the environment variable
'CUSTOM_CONNECTION_CHAT_DEPLOYMENT_NAME' by default. If the environment variable is not set, it will use the
original value as a fallback.
"""
for connection_name, connection in connections.items():
values = connection.get("value", {})
for key, val in values.items():
connection_name = connection_name.replace(" ", "_")
env_name = f"{connection_name}_{key}".upper()
if env_name not in os.environ:
continue
values[key] = os.environ[env_name]
logger.info(f"Connection {connection_name}'s {key} is overridden with environment variable {env_name}")
return connections
def resolve_connections_environment_variable_reference(connections: Dict[str, dict]):
"""The function will resolve connection secrets env var reference like api_key: ${env:KEY}"""
for connection in connections.values():
values = connection.get("value", {})
for key, val in values.items():
if not _match_env_reference(val):
continue
env_name = _match_env_reference(val)
if env_name not in os.environ:
raise UserErrorException(f"Environment variable {env_name} is not found.")
values[key] = os.environ[env_name]
return connections
def update_dict_value_with_connections(built_connections, connection_dict: dict):
for key, val in connection_dict.items():
connection_name, connection_key = _match_reference(val)
if connection_name is None:
continue
if connection_name not in built_connections:
continue
if connection_key not in built_connections[connection_name]["value"]:
continue
connection_dict[key] = built_connections[connection_name]["value"][connection_key]
def in_jupyter_notebook() -> bool:
"""
Checks if user is using a Jupyter Notebook. This is necessary because logging is not allowed in
non-Jupyter contexts.
Adapted from https://stackoverflow.com/a/22424821
"""
try: # cspell:ignore ipython
from IPython import get_ipython
if "IPKernelApp" not in get_ipython().config:
return False
except ImportError:
return False
except AttributeError:
return False
return True
def render_jinja_template(template_path, *, trim_blocks=True, keep_trailing_newline=True, **kwargs):
with open(template_path, "r", encoding=DEFAULT_ENCODING) as f:
template = Template(f.read(), trim_blocks=trim_blocks, keep_trailing_newline=keep_trailing_newline)
return template.render(**kwargs)
def print_yellow_warning(message):
from colorama import Fore, init
init(autoreset=True)
print(Fore.YELLOW + message)
def print_red_error(message):
from colorama import Fore, init
init(autoreset=True)
print(Fore.RED + message)
def safe_parse_object_list(obj_list, parser, message_generator):
results = []
for obj in obj_list:
try:
results.append(parser(obj))
except Exception as e:
extended_message = f"{message_generator(obj)} Error: {type(e).__name__}, {str(e)}"
print_yellow_warning(extended_message)
return results
def _sanitize_python_variable_name(name: str):
from promptflow._utils.utils import _sanitize_python_variable_name
return _sanitize_python_variable_name(name)
def _get_additional_includes(yaml_path):
flow_dag = load_yaml(yaml_path)
return flow_dag.get("additional_includes", [])
def _is_folder_to_compress(path: Path) -> bool:
"""Check if the additional include needs to compress corresponding folder as a zip.
For example, given additional include /mnt/c/hello.zip
1) if a file named /mnt/c/hello.zip already exists, return False (simply copy)
2) if a folder named /mnt/c/hello exists, return True (compress as a zip and copy)
:param path: Given path in additional include.
:type path: Path
:return: If the path need to be compressed as a zip file.
:rtype: bool
"""
if path.suffix != ".zip":
return False
# if zip file exists, simply copy as other additional includes
if path.exists():
return False
# remove .zip suffix and check whether the folder exists
stem_path = path.parent / path.stem
return stem_path.is_dir()
def _resolve_folder_to_compress(base_path: Path, include: str, dst_path: Path) -> None:
"""resolve the zip additional include, need to compress corresponding folder."""
zip_additional_include = (base_path / include).resolve()
folder_to_zip = zip_additional_include.parent / zip_additional_include.stem
zip_file = dst_path / zip_additional_include.name
with zipfile.ZipFile(zip_file, "w") as zf:
zf.write(folder_to_zip, os.path.relpath(folder_to_zip, folder_to_zip.parent)) # write root in zip
for root, _, files in os.walk(folder_to_zip, followlinks=True):
for file in files:
file_path = os.path.join(folder_to_zip, file)
zf.write(file_path, os.path.relpath(file_path, folder_to_zip.parent))
@contextmanager
def _merge_local_code_and_additional_includes(code_path: Path):
# TODO: unify variable names: flow_dir_path, flow_dag_path, flow_path
def additional_includes_copy(src, relative_path, target_dir):
if src.is_file():
dst = Path(target_dir) / relative_path
dst.parent.mkdir(parents=True, exist_ok=True)
if dst.exists():
logger.warning(
"Found duplicate file in additional includes, "
f"additional include file {src} will overwrite {relative_path}"
)
shutil.copy2(src, dst)
else:
for name in src.glob("*"):
additional_includes_copy(name, Path(relative_path) / name.name, target_dir)
if code_path.is_dir():
yaml_path = (Path(code_path) / DAG_FILE_NAME).resolve()
code_path = code_path.resolve()
else:
yaml_path = code_path.resolve()
code_path = code_path.parent.resolve()
with tempfile.TemporaryDirectory() as temp_dir:
shutil.copytree(code_path.resolve().as_posix(), temp_dir, dirs_exist_ok=True)
for item in _get_additional_includes(yaml_path):
src_path = Path(item)
if not src_path.is_absolute():
src_path = (code_path / item).resolve()
if _is_folder_to_compress(src_path):
_resolve_folder_to_compress(code_path, item, Path(temp_dir))
# early continue as the folder is compressed as a zip file
continue
if not src_path.exists():
error = ValueError(f"Unable to find additional include {item}")
raise UserErrorException(
target=ErrorTarget.CONTROL_PLANE_SDK,
message=str(error),
error=error,
)
additional_includes_copy(src_path, relative_path=src_path.name, target_dir=temp_dir)
yield temp_dir
def incremental_print(log: str, printed: int, fileout) -> int:
count = 0
for line in log.splitlines():
if count >= printed:
fileout.write(line + "\n")
printed += 1
count += 1
return printed
def get_promptflow_sdk_version() -> str:
try:
return promptflow.__version__
except AttributeError:
# if promptflow is installed from source, it does not have __version__ attribute
return "0.0.1"
def print_pf_version():
print("promptflow\t\t\t {}".format(get_promptflow_sdk_version()))
print()
print("Executable '{}'".format(os.path.abspath(sys.executable)))
print("Python ({}) {}".format(platform.system(), sys.version))
class PromptflowIgnoreFile(IgnoreFile):
# TODO add more files to this list.
IGNORE_FILE = [".runs", "__pycache__"]
def __init__(self, prompt_flow_path: Union[Path, str]):
super(PromptflowIgnoreFile, self).__init__(prompt_flow_path)
self._path = Path(prompt_flow_path)
self._ignore_tools_json = False
@property
def base_path(self) -> Path:
return self._path
def _get_ignore_list(self):
"""Get ignore list from ignore file contents."""
if not self.exists():
return []
base_ignore = get_ignore_file(self.base_path)
result = self.IGNORE_FILE + base_ignore._get_ignore_list()
if self._ignore_tools_json:
result.append(f"{PROMPT_FLOW_DIR_NAME}/{FLOW_TOOLS_JSON}")
return result
def _generate_meta_from_files(
tools: List[Tuple[str, str]], flow_directory: Path, tools_dict: dict, exception_dict: dict
) -> None:
with _change_working_dir(flow_directory), inject_sys_path(flow_directory):
for source, tool_type in tools:
try:
tools_dict[source] = generate_tool_meta_dict_by_file(source, ToolType(tool_type))
except Exception as e:
exception_dict[source] = str(e)
def _generate_tool_meta(
flow_directory: Path,
tools: List[Tuple[str, str]],
raise_error: bool,
timeout: int,
*,
include_errors_in_output: bool = False,
load_in_subprocess: bool = True,
) -> Dict[str, dict]:
"""Generate tool meta from files.
:param flow_directory: flow directory
:param tools: tool list
:param raise_error: whether raise error when generate meta failed
:param timeout: timeout for generate meta
:param include_errors_in_output: whether include errors in output
:param load_in_subprocess: whether load tool meta with subprocess to prevent system path disturb. Default is True.
If set to False, will load tool meta in sync mode and timeout need to be handled outside current process.
:return: tool meta dict
"""
if load_in_subprocess:
# use multiprocess generate to avoid system path disturb
manager = multiprocessing.Manager()
tools_dict = manager.dict()
exception_dict = manager.dict()
p = multiprocessing.Process(
target=_generate_meta_from_files, args=(tools, flow_directory, tools_dict, exception_dict)
)
p.start()
p.join(timeout=timeout)
if p.is_alive():
logger.warning(f"Generate meta timeout after {timeout} seconds, terminate the process.")
p.terminate()
p.join()
else:
tools_dict, exception_dict = {}, {}
# There is no built-in method to forcefully stop a running thread/coroutine in Python
# because abruptly stopping a thread can cause issues like resource leaks,
# deadlocks, or inconsistent states.
# Caller needs to handle the timeout outside current process.
logger.warning(
"Generate meta in current process and timeout won't take effect. "
"Please handle timeout manually outside current process."
)
_generate_meta_from_files(tools, flow_directory, tools_dict, exception_dict)
res = {source: tool for source, tool in tools_dict.items()}
for source in res:
# remove name in tool meta
res[source].pop("name")
# convert string Enum to string
if isinstance(res[source]["type"], Enum):
res[source]["type"] = res[source]["type"].value
# not all tools have inputs, so check first
if "inputs" in res[source]:
for tool_input in res[source]["inputs"]:
tool_input_type = res[source]["inputs"][tool_input]["type"]
for i in range(len(tool_input_type)):
if isinstance(tool_input_type[i], Enum):
tool_input_type[i] = tool_input_type[i].value
# collect errors and print warnings
errors = {
source: exception for source, exception in exception_dict.items()
} # for not processed tools, regard as timeout error
for source, _ in tools:
if source not in res and source not in errors:
errors[source] = f"Generate meta timeout for source {source!r}."
for source in errors:
if include_errors_in_output:
res[source] = errors[source]
else:
logger.warning(f"Generate meta for source {source!r} failed: {errors[source]}.")
if raise_error and len(errors) > 0:
error_message = "Generate meta failed, detail error(s):\n" + json.dumps(errors, indent=4)
raise GenerateFlowToolsJsonError(error_message)
return res
def _retrieve_tool_func_result(func_call_scenario: str, function_config: Dict):
"""Retrieve tool func result according to func_call_scenario.
:param func_call_scenario: function call scenario
:param function_config: function config in tool meta. Should contain'func_path' and 'func_kwargs'.
:return: func call result according to func_call_scenario.
"""
func_path = function_config.get("func_path", "")
func_kwargs = function_config.get("func_kwargs", {})
# May call azure control plane api in the custom function to list Azure resources.
# which may need Azure workspace triple.
# TODO: move this method to a common place.
from promptflow._cli._utils import get_workspace_triad_from_local
workspace_triad = get_workspace_triad_from_local()
if workspace_triad.subscription_id and workspace_triad.resource_group_name and workspace_triad.workspace_name:
result = retrieve_tool_func_result(func_call_scenario, func_path, func_kwargs, workspace_triad._asdict())
# if no workspace triple available, just skip.
else:
result = retrieve_tool_func_result(func_call_scenario, func_path, func_kwargs)
result_with_log = {"result": result, "logs": {}}
return result_with_log
def _gen_dynamic_list(function_config: Dict) -> List:
"""Generate dynamic list for a tool input.
:param function_config: function config in tool meta. Should contain'func_path' and 'func_kwargs'.
:return: a list of tool input dynamic enums.
"""
func_path = function_config.get("func_path", "")
func_kwargs = function_config.get("func_kwargs", {})
# May call azure control plane api in the custom function to list Azure resources.
# which may need Azure workspace triple.
# TODO: move this method to a common place.
from promptflow._cli._utils import get_workspace_triad_from_local
workspace_triad = get_workspace_triad_from_local()
if workspace_triad.subscription_id and workspace_triad.resource_group_name and workspace_triad.workspace_name:
return gen_dynamic_list(func_path, func_kwargs, workspace_triad._asdict())
# if no workspace triple available, just skip.
else:
return gen_dynamic_list(func_path, func_kwargs)
def _generate_package_tools(keys: Optional[List[str]] = None) -> dict:
from promptflow._core.tools_manager import collect_package_tools
return collect_package_tools(keys=keys)
def _update_involved_tools_and_packages(
_node,
_node_path,
*,
tools: List,
used_packages: Set,
source_path_mapping: Dict[str, List[str]],
):
source, tool_type = pydash.get(_node, "source.path", None), _node.get("type", None)
used_packages.add(pydash.get(_node, "source.tool", None))
if source is None or tool_type is None:
return
# for custom LLM tool, its source points to the used prompt template so handle it as prompt tool
if tool_type == ToolType.CUSTOM_LLM:
tool_type = ToolType.PROMPT
if pydash.get(_node, "source.type") not in ["code", "package_with_prompt"]:
return
pair = (source, tool_type.lower())
if pair not in tools:
tools.append(pair)
source_path_mapping[source].append(f"{_node_path}.source.path")
def _get_involved_code_and_package(
data: dict,
) -> Tuple[List[Tuple[str, str]], Set[str], Dict[str, List[str]]]:
tools = [] # List[Tuple[source_file, tool_type]]
used_packages = set()
source_path_mapping = collections.defaultdict(list)
for node_i, node in enumerate(data[NODES]):
_update_involved_tools_and_packages(
node,
f"{NODES}.{node_i}",
tools=tools,
used_packages=used_packages,
source_path_mapping=source_path_mapping,
)
# understand DAG to parse variants
# TODO: should we allow source to appear both in node and node variants?
if node.get(USE_VARIANTS) is True:
node_variants = data[NODE_VARIANTS][node["name"]]
for variant_id in node_variants[VARIANTS]:
node_with_variant = node_variants[VARIANTS][variant_id][NODE]
_update_involved_tools_and_packages(
node_with_variant,
f"{NODE_VARIANTS}.{node['name']}.{VARIANTS}.{variant_id}.{NODE}",
tools=tools,
used_packages=used_packages,
source_path_mapping=source_path_mapping,
)
if None in used_packages:
used_packages.remove(None)
return tools, used_packages, source_path_mapping
def generate_flow_tools_json(
flow_directory: Union[str, Path],
dump: bool = True,
raise_error: bool = True,
timeout: int = FLOW_TOOLS_JSON_GEN_TIMEOUT,
*,
include_errors_in_output: bool = False,
target_source: str = None,
used_packages_only: bool = False,
source_path_mapping: Dict[str, List[str]] = None,
) -> dict:
"""Generate flow.tools.json for a flow directory.
:param flow_directory: path to flow directory.
:param dump: whether to dump to .promptflow/flow.tools.json, default value is True.
:param raise_error: whether to raise the error, default value is True.
:param timeout: timeout for generation, default value is 60 seconds.
:param include_errors_in_output: whether to include error messages in output, default value is False.
:param target_source: the source name to filter result, default value is None. Note that we will update system path
in coroutine if target_source is provided given it's expected to be from a specific cli call.
:param used_packages_only: whether to only include used packages, default value is False.
:param source_path_mapping: if specified, record yaml paths for each source.
"""
flow_directory = Path(flow_directory).resolve()
# parse flow DAG
data = load_yaml(flow_directory / DAG_FILE_NAME)
tools, used_packages, _source_path_mapping = _get_involved_code_and_package(data)
# update passed in source_path_mapping if specified
if source_path_mapping is not None:
source_path_mapping.update(_source_path_mapping)
# filter tools by target_source if specified
if target_source is not None:
tools = list(filter(lambda x: x[0] == target_source, tools))
# generate content
# TODO: remove type in tools (input) and code (output)
flow_tools = {
"code": _generate_tool_meta(
flow_directory,
tools,
raise_error=raise_error,
timeout=timeout,
include_errors_in_output=include_errors_in_output,
# we don't need to protect system path according to the target usage when target_source is specified
load_in_subprocess=target_source is None,
),
# specified source may only appear in code tools
"package": {}
if target_source is not None
else _generate_package_tools(keys=list(used_packages) if used_packages_only else None),
}
if dump:
# dump as flow.tools.json
promptflow_folder = flow_directory / PROMPT_FLOW_DIR_NAME
promptflow_folder.mkdir(exist_ok=True)
with open(promptflow_folder / FLOW_TOOLS_JSON, mode="w", encoding=DEFAULT_ENCODING) as f:
json.dump(flow_tools, f, indent=4)
return flow_tools
class ClientUserAgentUtil:
"""SDK/CLI side user agent utilities."""
@classmethod
def _get_context(cls):
from promptflow._core.operation_context import OperationContext
return OperationContext.get_instance()
@classmethod
def get_user_agent(cls):
from promptflow._core.operation_context import OperationContext
context = cls._get_context()
# directly get from context since client side won't need promptflow/xxx.
return context.get(OperationContext.USER_AGENT_KEY, "").strip()
@classmethod
def append_user_agent(cls, user_agent: Optional[str]):
if not user_agent:
return
context = cls._get_context()
context.append_user_agent(user_agent)
@classmethod
def update_user_agent_from_env_var(cls):
# this is for backward compatibility: we should use PF_USER_AGENT in newer versions.
for env_name in [USER_AGENT, PF_USER_AGENT]:
if env_name in os.environ:
cls.append_user_agent(os.environ[env_name])
@classmethod
def update_user_agent_from_config(cls):
"""Update user agent from config. 1p customer will set it. We'll add PFCustomer_ as prefix."""
from promptflow._sdk._configuration import Configuration
config = Configuration.get_instance()
user_agent = config.get_user_agent()
if user_agent:
cls.append_user_agent(user_agent)
def setup_user_agent_to_operation_context(user_agent):
"""Setup user agent to OperationContext.
For calls from extension, ua will be like: prompt-flow-extension/ promptflow-cli/ promptflow-sdk/
For calls from CLI, ua will be like: promptflow-cli/ promptflow-sdk/
For calls from SDK, ua will be like: promptflow-sdk/
For 1p customer call which set user agent in config, ua will be like: PFCustomer_XXX/
"""
# add user added UA after SDK/CLI
ClientUserAgentUtil.append_user_agent(user_agent)
ClientUserAgentUtil.update_user_agent_from_env_var()
ClientUserAgentUtil.update_user_agent_from_config()
return ClientUserAgentUtil.get_user_agent()
def call_from_extension() -> bool:
"""Return true if current request is from extension."""
ClientUserAgentUtil.update_user_agent_from_env_var()
user_agent = ClientUserAgentUtil.get_user_agent()
return EXTENSION_UA in user_agent
def generate_random_string(length: int = 6) -> str:
import random
import string
return "".join(random.choice(string.ascii_lowercase) for _ in range(length))
def copy_tree_respect_template_and_ignore_file(source: Path, target: Path, render_context: dict = None):
def is_template(path: str):
return path.endswith(".jinja2")
for source_path, target_path in get_upload_files_from_folder(
path=source,
ignore_file=PromptflowIgnoreFile(prompt_flow_path=source),
):
(target / target_path).parent.mkdir(parents=True, exist_ok=True)
if render_context is None or not is_template(source_path):
shutil.copy(source_path, target / target_path)
else:
(target / target_path[: -len(".jinja2")]).write_bytes(
# always use unix line ending
render_jinja_template(source_path, **render_context)
.encode("utf-8")
.replace(b"\r\n", b"\n"),
)
def get_local_connections_from_executable(
executable, client, connections_to_ignore: List[str] = None, connections_to_add: List[str] = None
):
"""Get local connections from executable.
executable: The executable flow object.
client: Local client to get connections.
connections_to_ignore: The connection names to ignore when getting connections.
connections_to_add: The connection names to add when getting connections.
"""
connection_names = executable.get_connection_names()
if connections_to_add:
connection_names.update(connections_to_add)
connections_to_ignore = connections_to_ignore or []
result = {}
for n in connection_names:
if n not in connections_to_ignore:
conn = client.connections.get(name=n, with_secrets=True)
result[n] = conn._to_execution_connection_dict()
return result
def _generate_connections_dir():
# Get Python executable path
python_path = sys.executable
# Hash the Python executable path
hash_object = hashlib.sha1(python_path.encode())
hex_dig = hash_object.hexdigest()
# Generate the connections system path using the hash
connections_dir = (HOME_PROMPT_FLOW_DIR / "envs" / hex_dig / "connections").resolve()
return connections_dir
_refresh_connection_dir_lock = FileLock(REFRESH_CONNECTIONS_DIR_LOCK_PATH)
# This function is used by extension to generate the connection files every time collect tools.
def refresh_connections_dir(connection_spec_files, connection_template_yamls):
connections_dir = _generate_connections_dir()
# Use lock to prevent concurrent access
with _refresh_connection_dir_lock:
if os.path.isdir(connections_dir):
shutil.rmtree(connections_dir)
os.makedirs(connections_dir)
if connection_spec_files and connection_template_yamls:
for connection_name, content in connection_spec_files.items():
file_name = connection_name + ".spec.json"
with open(connections_dir / file_name, "w", encoding=DEFAULT_ENCODING) as f:
json.dump(content, f, indent=2)
# use YAML to dump template file in order to keep the comments
for connection_name, content in connection_template_yamls.items():
yaml_data = load_yaml_string(content)
file_name = connection_name + ".template.yaml"
with open(connections_dir / file_name, "w", encoding=DEFAULT_ENCODING) as f:
dump_yaml(yaml_data, f)
def dump_flow_result(flow_folder, prefix, flow_result=None, node_result=None, custom_path=None):
"""Dump flow result for extension.
:param flow_folder: The flow folder.
:param prefix: The file prefix.
:param flow_result: The flow result returned by exec_line.
:param node_result: The node result when test node returned by load_and_exec_node.
:param custom_path: The custom path to dump flow result.
"""
if flow_result:
flow_serialize_result = {
"flow_runs": [serialize(flow_result.run_info)],
"node_runs": [serialize(run) for run in flow_result.node_run_infos.values()],
}
else:
flow_serialize_result = {
"flow_runs": [],
"node_runs": [serialize(node_result)],
}
dump_folder = Path(flow_folder) / PROMPT_FLOW_DIR_NAME if custom_path is None else Path(custom_path)
dump_folder.mkdir(parents=True, exist_ok=True)
with open(dump_folder / f"{prefix}.detail.json", "w", encoding=DEFAULT_ENCODING) as f:
json.dump(flow_serialize_result, f, indent=2, ensure_ascii=False)
if node_result:
metrics = flow_serialize_result["node_runs"][0]["metrics"]
output = flow_serialize_result["node_runs"][0]["output"]
else:
metrics = flow_serialize_result["flow_runs"][0]["metrics"]
output = flow_serialize_result["flow_runs"][0]["output"]
if metrics:
with open(dump_folder / f"{prefix}.metrics.json", "w", encoding=DEFAULT_ENCODING) as f:
json.dump(metrics, f, indent=2, ensure_ascii=False)
if output:
with open(dump_folder / f"{prefix}.output.json", "w", encoding=DEFAULT_ENCODING) as f:
json.dump(output, f, indent=2, ensure_ascii=False)
def read_write_by_user():
return stat.S_IRUSR | stat.S_IWUSR
def remove_empty_element_from_dict(obj: dict) -> dict:
"""Remove empty element from dict, e.g. {"a": 1, "b": {}} -> {"a": 1}"""
new_dict = {}
for key, value in obj.items():
if isinstance(value, dict):
value = remove_empty_element_from_dict(value)
if value is not None:
new_dict[key] = value
return new_dict
def is_github_codespaces():
# Ref:
# https://docs.github.com/en/codespaces/developing-in-a-codespace/default-environment-variables-for-your-codespace
return os.environ.get("CODESPACES", None) == "true"
def interactive_credential_disabled():
return os.environ.get(PF_NO_INTERACTIVE_LOGIN, "false").lower() == "true"
def is_from_cli():
from promptflow._cli._user_agent import USER_AGENT as CLI_UA
return CLI_UA in ClientUserAgentUtil.get_user_agent()
def is_url(value: Union[PathLike, str]) -> bool:
try:
result = urlparse(str(value))
return all([result.scheme, result.netloc])
except ValueError:
return False
def is_remote_uri(obj) -> bool:
# return True if it's supported remote uri
if isinstance(obj, str):
if obj.startswith(REMOTE_URI_PREFIX):
# azureml: started, azureml:name:version, azureml://xxx
return True
elif is_url(obj):
return True
return False
def parse_remote_flow_pattern(flow: object) -> str:
# Check if the input matches the correct pattern
flow_name = None
error_message = (
f"Invalid remote flow pattern, got {flow!r} while expecting "
f"a remote workspace flow like '{REMOTE_URI_PREFIX}<flow-name>', or a remote registry flow like "
f"'{REMOTE_URI_PREFIX}//registries/<registry-name>/models/<flow-name>/versions/<version>'"
)
if not isinstance(flow, str) or not flow.startswith(REMOTE_URI_PREFIX):
raise UserErrorException(error_message)
# check for registry flow pattern
if flow.startswith(REGISTRY_URI_PREFIX):
pattern = r"azureml://registries/.*?/models/(?P<name>.*?)/versions/(?P<version>.*?)$"
match = re.match(pattern, flow)
if not match or len(match.groups()) != 2:
raise UserErrorException(error_message)
flow_name, _ = match.groups()
# check for workspace flow pattern
elif flow.startswith(REMOTE_URI_PREFIX):
pattern = r"azureml:(?P<name>.*?)$"
match = re.match(pattern, flow)
if not match or len(match.groups()) != 1:
raise UserErrorException(error_message)
flow_name = match.groups()[0]
return flow_name
def get_connection_operation(connection_provider: str, credential=None, user_agent: str = None):
"""
Get connection operation based on connection provider.
This function will be called by PFClient, so please do not refer to PFClient in this function.
:param connection_provider: Connection provider, e.g. local, azureml, azureml://subscriptions..., etc.
:type connection_provider: str
:param credential: Credential when remote provider, default to chained credential DefaultAzureCredential.
:type credential: object
:param user_agent: User Agent
:type user_agent: str
"""
if connection_provider == ConnectionProvider.LOCAL.value:
from promptflow._sdk.operations._connection_operations import ConnectionOperations
logger.debug("PFClient using local connection operations.")
connection_operation = ConnectionOperations()
elif connection_provider.startswith(ConnectionProvider.AZUREML.value):
from promptflow._sdk.operations._local_azure_connection_operations import LocalAzureConnectionOperations
logger.debug(f"PFClient using local azure connection operations with credential {credential}.")
if user_agent is None:
connection_operation = LocalAzureConnectionOperations(connection_provider, credential=credential)
else:
connection_operation = LocalAzureConnectionOperations(connection_provider, user_agent=user_agent)
else:
error = ValueError(f"Unsupported connection provider: {connection_provider}")
raise UserErrorException(
target=ErrorTarget.CONTROL_PLANE_SDK,
message=str(error),
error=error,
)
return connection_operation
# extract open read/write as partial to centralize the encoding
read_open = partial(open, mode="r", encoding=DEFAULT_ENCODING)
write_open = partial(open, mode="w", encoding=DEFAULT_ENCODING)
# nan, inf and -inf are not JSON serializable according to https://docs.python.org/3/library/json.html#json.loads
# `parse_constant` will be called to handle these values
# similar idea for below `json_load` and its parameter `parse_const_as_str`
json_loads_parse_const_as_str = partial(json.loads, parse_constant=lambda x: str(x))
# extract some file operations inside this file
def json_load(file, parse_const_as_str: bool = False) -> str:
with read_open(file) as f:
if parse_const_as_str is True:
return json.load(f, parse_constant=lambda x: str(x))
else:
return json.load(f)
def json_dump(obj, file) -> None:
with write_open(file) as f:
json.dump(obj, f, ensure_ascii=False)
def pd_read_json(file) -> "DataFrame":
import pandas as pd
with read_open(file) as f:
return pd.read_json(f, orient="records", lines=True)
def get_mac_address() -> Union[str, None]:
"""Get the MAC ID of the first network card."""
try:
import psutil
mac_address = None
net_address = psutil.net_if_addrs()
eth = []
# Query the first network card in order and obtain the MAC address of the first network card.
# "Ethernet" is the name of the Windows network card.
# "eth", "ens", "eno" are the name of the Linux & Mac network card.
net_interface_names = ["Ethernet", "eth0", "eth1", "ens0", "ens1", "eno0", "eno1"]
for net_interface_name in net_interface_names:
if net_interface_name in net_address:
eth = net_address[net_interface_name]
break
for net_interface in eth:
if net_interface.family == psutil.AF_LINK: # mac address
mac_address = str(net_interface.address)
break
# If obtaining the network card MAC ID fails, obtain other MAC ID
if mac_address is None:
node = uuid.getnode()
if node != 0:
mac_address = str(uuid.UUID(int=node).hex[-12:])
return mac_address
except Exception as e:
logger.debug(f"get mac id error: {str(e)}")
return None
def get_system_info() -> Tuple[str, str, str]:
"""Get the host name, system, and machine."""
try:
import platform
return platform.node(), platform.system(), platform.machine()
except Exception as e:
logger.debug(f"get host name error: {str(e)}")
return "", "", ""
def gen_uuid_by_compute_info() -> Union[str, None]:
mac_address = get_mac_address()
host_name, system, machine = get_system_info()
if mac_address:
# Use sha256 convert host_name+system+machine to a fixed length string
# and concatenate it after the mac address to ensure that the concatenated string is unique.
system_info_hash = hashlib.sha256((host_name + system + machine).encode()).hexdigest()
compute_info_hash = hashlib.sha256((mac_address + system_info_hash).encode()).hexdigest()
return str(uuid.uuid5(uuid.NAMESPACE_OID, compute_info_hash))
return None
def convert_time_unix_nano_to_timestamp(time_unix_nano: str) -> str:
nanoseconds = int(time_unix_nano)
seconds = nanoseconds / 1_000_000_000
timestamp = datetime.datetime.utcfromtimestamp(seconds)
return timestamp.isoformat()
def parse_kv_from_pb_attribute(attribute: Dict) -> Tuple[str, str]:
attr_key = attribute["key"]
# suppose all values are flattened here
# so simply regard the first value as the attribute value
attr_value = list(attribute["value"].values())[0]
return attr_key, attr_value
def flatten_pb_attributes(attributes: List[Dict]) -> Dict:
flattened_attributes = {}
for attribute in attributes:
attr_key, attr_value = parse_kv_from_pb_attribute(attribute)
flattened_attributes[attr_key] = attr_value
return flattened_attributes
def parse_otel_span_status_code(value: int) -> str:
# map int value to string
# https://github.com/open-telemetry/opentelemetry-specification/blob/v1.22.0/specification/trace/api.md#set-status
# https://github.com/open-telemetry/opentelemetry-python/blob/v1.22.0/opentelemetry-api/src/opentelemetry/trace/status.py#L22-L32
if value == 0:
return "Unset"
elif value == 1:
return "Ok"
else:
return "Error"
| promptflow/src/promptflow/promptflow/_sdk/_utils.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_utils.py",
"repo_id": "promptflow",
"token_count": 18463
} | 13 |
Exported entry file & its dependencies are located in the same folder. The structure is as below:
- flow: the folder contains all the flow files
- connections: the folder contains yaml files to create all related connections
- app.py: the entry file is included as the entry point for the bundled application.
- app.spec: the spec file tells PyInstaller how to process your script.
- main.py: it will start streamlit service and be called by the entry file.
- settings.json: a json file to store the settings of the executable application.
- build: a folder contains various log and working files.
- dist: a folder contains the executable application.
- README.md: the readme file to describe how to use the exported files and scripts.
Please refer to [official doc](https://microsoft.github.io/promptflow/how-to-guides/deploy-a-flow/index.html)
for more details about how to use the exported files and scripts.
| promptflow/src/promptflow/promptflow/_sdk/data/executable/README.md/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/data/executable/README.md",
"repo_id": "promptflow",
"token_count": 223
} | 14 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import copy
import datetime
import json
import typing
from dataclasses import dataclass
from google.protobuf.json_format import MessageToJson
from opentelemetry.proto.trace.v1.trace_pb2 import Span as PBSpan
from promptflow._constants import (
DEFAULT_SPAN_TYPE,
ResourceAttributeFieldName,
SpanAttributeFieldName,
SpanContextFieldName,
SpanFieldName,
SpanResourceFieldName,
SpanStatusFieldName,
)
from promptflow._sdk._orm.trace import Span as ORMSpan
from promptflow._sdk._utils import (
convert_time_unix_nano_to_timestamp,
flatten_pb_attributes,
parse_otel_span_status_code,
)
class Span:
"""Span is exactly the same as OpenTelemetry Span."""
def __init__(
self,
name: str,
context: typing.Dict[str, str],
kind: str,
start_time: str,
end_time: str,
status: str,
attributes: typing.Dict[str, str],
resource: typing.Dict,
# should come from attributes
span_type: str,
session_id: str,
# optional fields
parent_span_id: typing.Optional[str] = None,
events: typing.Optional[typing.List] = None,
links: typing.Optional[typing.List] = None,
# prompt flow concepts
path: typing.Optional[str] = None,
run: typing.Optional[str] = None,
experiment: typing.Optional[str] = None,
):
self.name = name
self.span_id = context[SpanContextFieldName.SPAN_ID]
self.trace_id = context[SpanContextFieldName.TRACE_ID]
self.span_type = span_type
self.parent_span_id = parent_span_id
self.session_id = session_id
self.path = path
self.run = run
self.experiment = experiment
self._content = {
SpanFieldName.NAME: self.name,
SpanFieldName.CONTEXT: copy.deepcopy(context),
SpanFieldName.KIND: kind,
SpanFieldName.PARENT_ID: self.parent_span_id,
SpanFieldName.START_TIME: start_time,
SpanFieldName.END_TIME: end_time,
SpanFieldName.STATUS: status,
SpanFieldName.ATTRIBUTES: copy.deepcopy(attributes),
SpanFieldName.EVENTS: copy.deepcopy(events),
SpanFieldName.LINKS: copy.deepcopy(links),
SpanFieldName.RESOURCE: copy.deepcopy(resource),
}
def _persist(self) -> None:
self._to_orm_object().persist()
@staticmethod
def _from_orm_object(obj: ORMSpan) -> "Span":
content = json.loads(obj.content)
return Span(
name=obj.name,
context=content[SpanFieldName.CONTEXT],
kind=content[SpanFieldName.KIND],
start_time=content[SpanFieldName.START_TIME],
end_time=content[SpanFieldName.END_TIME],
status=content[SpanFieldName.STATUS],
attributes=content[SpanFieldName.ATTRIBUTES],
resource=content[SpanFieldName.RESOURCE],
span_type=obj.span_type,
session_id=obj.session_id,
parent_span_id=obj.parent_span_id,
events=content[SpanFieldName.EVENTS],
links=content[SpanFieldName.LINKS],
path=obj.path,
run=obj.run,
experiment=obj.experiment,
)
def _to_orm_object(self) -> ORMSpan:
return ORMSpan(
name=self.name,
trace_id=self.trace_id,
span_id=self.span_id,
parent_span_id=self.parent_span_id,
span_type=self.span_type,
session_id=self.session_id,
content=json.dumps(self._content),
path=self.path,
run=self.run,
experiment=self.experiment,
)
@staticmethod
def _from_protobuf_object(obj: PBSpan, resource: typing.Dict) -> "Span":
span_dict = json.loads(MessageToJson(obj))
span_id = obj.span_id.hex()
trace_id = obj.trace_id.hex()
context = {
SpanContextFieldName.TRACE_ID: trace_id,
SpanContextFieldName.SPAN_ID: span_id,
SpanContextFieldName.TRACE_STATE: obj.trace_state,
}
parent_span_id = obj.parent_span_id.hex()
start_time = convert_time_unix_nano_to_timestamp(obj.start_time_unix_nano)
end_time = convert_time_unix_nano_to_timestamp(obj.end_time_unix_nano)
status = {
SpanStatusFieldName.STATUS_CODE: parse_otel_span_status_code(obj.status.code),
}
attributes = flatten_pb_attributes(span_dict[SpanFieldName.ATTRIBUTES])
# `span_type` are not standard fields in OpenTelemetry attributes
# for example, LangChain instrumentation, as we do not inject this;
# so we need to get it with default value to avoid KeyError
span_type = attributes.get(SpanAttributeFieldName.SPAN_TYPE, DEFAULT_SPAN_TYPE)
# parse from resource.attributes: session id, experiment
resource_attributes = resource[SpanResourceFieldName.ATTRIBUTES]
session_id = resource_attributes[ResourceAttributeFieldName.SESSION_ID]
experiment = resource_attributes.get(ResourceAttributeFieldName.EXPERIMENT_NAME, None)
return Span(
name=obj.name,
context=context,
kind=obj.kind,
start_time=start_time,
end_time=end_time,
status=status,
attributes=attributes,
resource=resource,
span_type=span_type,
session_id=session_id,
parent_span_id=parent_span_id,
experiment=experiment,
)
@dataclass
class _LineRunData:
"""Basic data structure for line run, no matter if it is a main or evaluation."""
line_run_id: str
trace_id: str
root_span_id: str
inputs: typing.Dict
outputs: typing.Dict
start_time: str
end_time: str
status: str
latency: float
name: str
kind: str
cumulative_token_count: typing.Optional[typing.Dict[str, int]]
def _from_root_span(span: Span) -> "_LineRunData":
attributes: dict = span._content[SpanFieldName.ATTRIBUTES]
if SpanAttributeFieldName.LINE_RUN_ID in attributes:
line_run_id = attributes[SpanAttributeFieldName.LINE_RUN_ID]
elif SpanAttributeFieldName.REFERENCED_LINE_RUN_ID in attributes:
line_run_id = attributes[SpanAttributeFieldName.REFERENCED_LINE_RUN_ID]
else:
# eager flow/arbitrary script
line_run_id = span.trace_id
start_time = datetime.datetime.fromisoformat(span._content[SpanFieldName.START_TIME])
end_time = datetime.datetime.fromisoformat(span._content[SpanFieldName.END_TIME])
# calculate `cumulative_token_count`
completion_token_count = int(attributes.get(SpanAttributeFieldName.COMPLETION_TOKEN_COUNT, 0))
prompt_token_count = int(attributes.get(SpanAttributeFieldName.PROMPT_TOKEN_COUNT, 0))
total_token_count = int(attributes.get(SpanAttributeFieldName.TOTAL_TOKEN_COUNT, 0))
# if there is no token usage, set `cumulative_token_count` to None
if total_token_count > 0:
cumulative_token_count = {
"completion": completion_token_count,
"prompt": prompt_token_count,
"total": total_token_count,
}
else:
cumulative_token_count = None
return _LineRunData(
line_run_id=line_run_id,
trace_id=span.trace_id,
root_span_id=span.span_id,
# for standard OpenTelemetry traces, there won't be `inputs` and `outputs` in attributes
inputs=json.loads(attributes.get(SpanAttributeFieldName.INPUTS, "{}")),
outputs=json.loads(attributes.get(SpanAttributeFieldName.OUTPUT, "{}")),
start_time=start_time.isoformat(),
end_time=end_time.isoformat(),
status=span._content[SpanFieldName.STATUS][SpanStatusFieldName.STATUS_CODE],
latency=(end_time - start_time).total_seconds(),
name=span.name,
kind=attributes.get(SpanAttributeFieldName.SPAN_TYPE, span.span_type),
cumulative_token_count=cumulative_token_count,
)
@dataclass
class LineRun:
"""Line run is an abstraction of spans related to prompt flow."""
line_run_id: str
trace_id: str
root_span_id: str
inputs: typing.Dict
outputs: typing.Dict
start_time: str
end_time: str
status: str
latency: float
name: str
kind: str
cumulative_token_count: typing.Optional[typing.Dict[str, int]] = None
evaluations: typing.Optional[typing.List[typing.Dict]] = None
@staticmethod
def _from_spans(spans: typing.List[Span]) -> typing.Optional["LineRun"]:
main_line_run_data: _LineRunData = None
evaluation_line_run_data_dict = dict()
for span in spans:
if span.parent_span_id:
continue
attributes = span._content[SpanFieldName.ATTRIBUTES]
if SpanAttributeFieldName.REFERENCED_LINE_RUN_ID in attributes:
evaluation_line_run_data_dict[span.name] = _LineRunData._from_root_span(span)
elif SpanAttributeFieldName.LINE_RUN_ID in attributes:
main_line_run_data = _LineRunData._from_root_span(span)
else:
# eager flow/arbitrary script
main_line_run_data = _LineRunData._from_root_span(span)
# main line run span is absent, ignore this line run
# this may happen when the line is still executing, or terminated;
# or the line run is killed before the traces exported
if main_line_run_data is None:
return None
evaluations = dict()
for eval_name, eval_line_run_data in evaluation_line_run_data_dict.items():
evaluations[eval_name] = eval_line_run_data
return LineRun(
line_run_id=main_line_run_data.line_run_id,
trace_id=main_line_run_data.trace_id,
root_span_id=main_line_run_data.root_span_id,
inputs=main_line_run_data.inputs,
outputs=main_line_run_data.outputs,
start_time=main_line_run_data.start_time,
end_time=main_line_run_data.end_time,
status=main_line_run_data.status,
latency=main_line_run_data.latency,
name=main_line_run_data.name,
kind=main_line_run_data.kind,
cumulative_token_count=main_line_run_data.cumulative_token_count,
evaluations=evaluations,
)
| promptflow/src/promptflow/promptflow/_sdk/entities/_trace.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/entities/_trace.py",
"repo_id": "promptflow",
"token_count": 4866
} | 15 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# pylint: disable=unused-argument,no-self-use
import copy
from pathlib import Path
from typing import Optional
from marshmallow import RAISE, fields, post_load, pre_load
from marshmallow.decorators import post_dump
from marshmallow.schema import Schema, SchemaMeta
from pydash import objects
from promptflow._sdk._constants import BASE_PATH_CONTEXT_KEY, FILE_PREFIX, PARAMS_OVERRIDE_KEY
from promptflow._utils.logger_utils import LoggerFactory
from promptflow._utils.yaml_utils import load_yaml
module_logger = LoggerFactory.get_logger(__name__)
class PatchedMeta:
ordered = True
unknown = RAISE
class PatchedBaseSchema(Schema):
class Meta:
unknown = RAISE
ordered = True
@post_dump
def remove_none(self, data, **kwargs):
"""Prevents from dumping attributes that are None, thus making the dump more compact."""
return dict((key, value) for key, value in data.items() if value is not None)
class PatchedSchemaMeta(SchemaMeta):
"""Currently there is an open issue in marshmallow, that the "unknown" property is not inherited.
We use a metaclass to inject a Meta class into all our Schema classes.
"""
def __new__(cls, name, bases, dct):
meta = dct.get("Meta")
if meta is None:
dct["Meta"] = PatchedMeta
else:
if not hasattr(meta, "unknown"):
dct["Meta"].unknown = RAISE
if not hasattr(meta, "ordered"):
dct["Meta"].ordered = True
if PatchedBaseSchema not in bases:
bases = bases + (PatchedBaseSchema,)
return super().__new__(cls, name, bases, dct)
class PathAwareSchema(PatchedBaseSchema, metaclass=PatchedSchemaMeta):
schema_ignored = fields.Str(data_key="$schema", dump_only=True)
def __init__(self, *args, **kwargs):
# this will make context of all PathAwareSchema child class point to one object
self.context = kwargs.get("context", None)
if self.context is None or self.context.get(BASE_PATH_CONTEXT_KEY, None) is None:
raise Exception("Base path for reading files is required when building PathAwareSchema")
# set old base path, note it's an Path object and point to the same object with
# self.context.get(BASE_PATH_CONTEXT_KEY)
self.old_base_path = self.context.get(BASE_PATH_CONTEXT_KEY)
super().__init__(*args, **kwargs)
@pre_load
def add_param_overrides(self, data, **kwargs):
# Removing params override from context so that overriding is done once on the yaml
# child schema should not override the params.
params_override = self.context.pop(PARAMS_OVERRIDE_KEY, None)
if params_override is not None:
for override in params_override:
for param, val in override.items():
# Check that none of the intermediary levels are string references (azureml/file)
param_tokens = param.split(".")
test_layer = data
for layer in param_tokens:
if test_layer is None:
continue
if isinstance(test_layer, str):
raise Exception(
f"Cannot use '--set' on properties defined by reference strings: --set {param}"
)
test_layer = test_layer.get(layer, None)
objects.set_(data, param, val)
return data
@pre_load
def trim_dump_only(self, data, **kwargs):
"""Marshmallow raises if dump_only fields are present in the schema. This is not desirable for our use case,
where read-only properties can be present in the yaml, and should simply be ignored, while we should raise in.
the case an unknown field is present - to prevent typos.
"""
if isinstance(data, str) or data is None:
return data
for key, value in self.fields.items(): # pylint: disable=no-member
if value.dump_only:
schema_key = value.data_key or key
if data.get(schema_key, None) is not None:
data.pop(schema_key)
return data
class YamlFileSchema(PathAwareSchema):
"""Base class that allows derived classes to be built from paths to separate yaml files in place of inline yaml
definitions.
This will be transparent to any parent schema containing a nested schema of the derived class, it will not need a
union type for the schema, a YamlFile string will be resolved by the pre_load method into a dictionary. On loading
the child yaml, update the base path to use for loading sub-child files.
"""
def __init__(self, *args, **kwargs):
self._previous_base_path = None
super().__init__(*args, **kwargs)
@classmethod
def _resolve_path(cls, data, base_path) -> Optional[Path]:
if isinstance(data, str) and data.startswith(FILE_PREFIX):
# Use directly if absolute path
path = Path(data[len(FILE_PREFIX) :])
if not path.is_absolute():
path = Path(base_path) / path
path.resolve()
return path
return None
@pre_load
def load_from_file(self, data, **kwargs):
path = self._resolve_path(data, Path(self.context[BASE_PATH_CONTEXT_KEY]))
if path is not None:
self._previous_base_path = Path(self.context[BASE_PATH_CONTEXT_KEY])
# Push update
# deepcopy self.context[BASE_PATH_CONTEXT_KEY] to update old base path
self.old_base_path = copy.deepcopy(self.context[BASE_PATH_CONTEXT_KEY])
self.context[BASE_PATH_CONTEXT_KEY] = path.parent
data = load_yaml(path)
return data
return data
# Schemas are read depth-first, so push/pop to update current path
@post_load
def reset_base_path_post_load(self, data, **kwargs):
if self._previous_base_path is not None:
# pop state
self.context[BASE_PATH_CONTEXT_KEY] = self._previous_base_path
return data
class CreateBySchema(metaclass=PatchedSchemaMeta):
user_object_id = fields.Str(dump_only=True, attribute="userObjectId")
user_tenant_id = fields.Str(dump_only=True, attribute="userTenantId")
user_name = fields.Str(dump_only=True, attribute="userName")
| promptflow/src/promptflow/promptflow/_sdk/schemas/_base.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/schemas/_base.py",
"repo_id": "promptflow",
"token_count": 2701
} | 16 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from typing import AbstractSet, Any, Dict, List, Mapping
from promptflow._utils.logger_utils import logger
from promptflow.contracts.flow import Flow, FlowInputDefinition, InputValueType
from promptflow.contracts.run_info import FlowRunInfo, Status
def apply_default_value_for_input(inputs: Dict[str, FlowInputDefinition], line_inputs: Mapping) -> Dict[str, Any]:
updated_inputs = dict(line_inputs or {})
for key, value in inputs.items():
if key not in updated_inputs and (value and value.default is not None):
updated_inputs[key] = value.default
return updated_inputs
def handle_line_failures(run_infos: List[FlowRunInfo], raise_on_line_failure: bool = False):
"""Handle line failures in batch run"""
failed = [i for i, r in enumerate(run_infos) if r.status == Status.Failed]
failed_msg = None
if len(failed) > 0:
failed_indexes = ",".join([str(i) for i in failed])
first_fail_exception = run_infos[failed[0]].error["message"]
if raise_on_line_failure:
failed_msg = "Flow run failed due to the error: " + first_fail_exception
raise Exception(failed_msg)
failed_msg = (
f"{len(failed)}/{len(run_infos)} flow run failed, indexes: [{failed_indexes}],"
f" exception of index {failed[0]}: {first_fail_exception}"
)
logger.error(failed_msg)
def get_aggregation_inputs_properties(flow: Flow) -> AbstractSet[str]:
"""Return the serialized InputAssignment of the aggregation nodes inputs.
For example, an aggregation node refers the outputs of a node named "grade",
then this function will return set("${grade.output}").
"""
normal_node_names = {node.name for node in flow.nodes if flow.is_normal_node(node.name)}
properties = set()
for node in flow.nodes:
if node.name in normal_node_names:
continue
for value in node.inputs.values():
if not value.value_type == InputValueType.NODE_REFERENCE:
continue
if value.value in normal_node_names:
properties.add(value.serialize())
return properties
def collect_lines(indexes: List[int], kvs: Mapping[str, List]) -> Mapping[str, List]:
"""Collect the values from the kvs according to the indexes."""
return {k: [v[i] for i in indexes] for k, v in kvs.items()}
| promptflow/src/promptflow/promptflow/_utils/execution_utils.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_utils/execution_utils.py",
"repo_id": "promptflow",
"token_count": 923
} | 17 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# coding=utf-8
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
# This file is used for handwritten extensions to the generated code. Example:
# https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md
def patch_sdk():
pass
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/_patch.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/_patch.py",
"repo_id": "promptflow",
"token_count": 415
} | 18 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.2, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._flows_provider_operations import build_get_index_entity_by_id_request, build_get_updated_entity_ids_for_workspace_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class FlowsProviderOperations:
"""FlowsProviderOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~flow.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get_index_entity_by_id(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
body: Optional["_models.UnversionedEntityRequestDto"] = None,
**kwargs: Any
) -> "_models.UnversionedEntityResponseDto":
"""get_index_entity_by_id.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param body:
:type body: ~flow.models.UnversionedEntityRequestDto
:keyword callable cls: A custom type or function that will be passed the direct response
:return: UnversionedEntityResponseDto, or the result of cls(response)
:rtype: ~flow.models.UnversionedEntityResponseDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UnversionedEntityResponseDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'UnversionedEntityRequestDto')
else:
_json = None
request = build_get_index_entity_by_id_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
template_url=self.get_index_entity_by_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('UnversionedEntityResponseDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_index_entity_by_id.metadata = {'url': '/flow/v1.0/flows/getIndexEntities'} # type: ignore
@distributed_trace_async
async def get_updated_entity_ids_for_workspace(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
body: Optional["_models.UnversionedRebuildIndexDto"] = None,
**kwargs: Any
) -> "_models.UnversionedRebuildResponseDto":
"""get_updated_entity_ids_for_workspace.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param body:
:type body: ~flow.models.UnversionedRebuildIndexDto
:keyword callable cls: A custom type or function that will be passed the direct response
:return: UnversionedRebuildResponseDto, or the result of cls(response)
:rtype: ~flow.models.UnversionedRebuildResponseDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UnversionedRebuildResponseDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'UnversionedRebuildIndexDto')
else:
_json = None
request = build_get_updated_entity_ids_for_workspace_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
template_url=self.get_updated_entity_ids_for_workspace.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('UnversionedRebuildResponseDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_updated_entity_ids_for_workspace.metadata = {'url': '/flow/v1.0/flows/rebuildIndex'} # type: ignore
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/operations/_flows_provider_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/operations/_flows_provider_operations.py",
"repo_id": "promptflow",
"token_count": 2881
} | 19 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.2, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_create_flow_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
experiment_id = kwargs.pop('experiment_id', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if experiment_id is not None:
query_parameters['experimentId'] = _SERIALIZER.query("experiment_id", experiment_id, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_flows_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
experiment_id = kwargs.pop('experiment_id', None) # type: Optional[str]
owned_only = kwargs.pop('owned_only', None) # type: Optional[bool]
flow_type = kwargs.pop('flow_type', None) # type: Optional[Union[str, "_models.FlowType"]]
list_view_type = kwargs.pop('list_view_type', None) # type: Optional[Union[str, "_models.ListViewType"]]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if experiment_id is not None:
query_parameters['experimentId'] = _SERIALIZER.query("experiment_id", experiment_id, 'str')
if owned_only is not None:
query_parameters['ownedOnly'] = _SERIALIZER.query("owned_only", owned_only, 'bool')
if flow_type is not None:
query_parameters['flowType'] = _SERIALIZER.query("flow_type", flow_type, 'str')
if list_view_type is not None:
query_parameters['listViewType'] = _SERIALIZER.query("list_view_type", list_view_type, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_flow_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
experiment_id = kwargs.pop('experiment_id') # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['experimentId'] = _SERIALIZER.query("experiment_id", experiment_id, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_patch_flow_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
experiment_id = kwargs.pop('experiment_id') # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['experimentId'] = _SERIALIZER.query("experiment_id", experiment_id, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_flow_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
experiment_id = kwargs.pop('experiment_id') # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['experimentId'] = _SERIALIZER.query("experiment_id", experiment_id, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_submit_flow_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
experiment_id = kwargs.pop('experiment_id') # type: str
endpoint_name = kwargs.pop('endpoint_name', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/submit')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['experimentId'] = _SERIALIZER.query("experiment_id", experiment_id, 'str')
if endpoint_name is not None:
query_parameters['endpointName'] = _SERIALIZER.query("endpoint_name", endpoint_name, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_flow_run_status_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
experiment_id = kwargs.pop('experiment_id', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/{flowRunId}/status')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if experiment_id is not None:
query_parameters['experimentId'] = _SERIALIZER.query("experiment_id", experiment_id, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_flow_run_info_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
experiment_id = kwargs.pop('experiment_id') # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/runs/{flowRunId}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['experimentId'] = _SERIALIZER.query("experiment_id", experiment_id, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_flow_child_runs_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
index = kwargs.pop('index', None) # type: Optional[int]
start_index = kwargs.pop('start_index', None) # type: Optional[int]
end_index = kwargs.pop('end_index', None) # type: Optional[int]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/runs/{flowRunId}/childRuns')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if index is not None:
query_parameters['index'] = _SERIALIZER.query("index", index, 'int')
if start_index is not None:
query_parameters['startIndex'] = _SERIALIZER.query("start_index", start_index, 'int')
if end_index is not None:
query_parameters['endIndex'] = _SERIALIZER.query("end_index", end_index, 'int')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_flow_node_runs_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
flow_run_id, # type: str
node_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
index = kwargs.pop('index', None) # type: Optional[int]
start_index = kwargs.pop('start_index', None) # type: Optional[int]
end_index = kwargs.pop('end_index', None) # type: Optional[int]
aggregation = kwargs.pop('aggregation', False) # type: Optional[bool]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/runs/{flowRunId}/nodeRuns/{nodeName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
"nodeName": _SERIALIZER.url("node_name", node_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if index is not None:
query_parameters['index'] = _SERIALIZER.query("index", index, 'int')
if start_index is not None:
query_parameters['startIndex'] = _SERIALIZER.query("start_index", start_index, 'int')
if end_index is not None:
query_parameters['endIndex'] = _SERIALIZER.query("end_index", end_index, 'int')
if aggregation is not None:
query_parameters['aggregation'] = _SERIALIZER.query("aggregation", aggregation, 'bool')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_flow_node_run_base_path_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
flow_run_id, # type: str
node_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/runs/{flowRunId}/nodeRuns/{nodeName}/basePath')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
"nodeName": _SERIALIZER.url("node_name", node_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_list_bulk_tests_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
experiment_id = kwargs.pop('experiment_id', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/bulkTests')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if experiment_id is not None:
query_parameters['experimentId'] = _SERIALIZER.query("experiment_id", experiment_id, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_bulk_test_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
bulk_test_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/bulkTests/{bulkTestId}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
"bulkTestId": _SERIALIZER.url("bulk_test_id", bulk_test_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_samples_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
use_snapshot = kwargs.pop('use_snapshot', False) # type: Optional[bool]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/samples')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if use_snapshot is not None:
query_parameters['useSnapshot'] = _SERIALIZER.query("use_snapshot", use_snapshot, 'bool')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_evaluate_flow_samples_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
use_snapshot = kwargs.pop('use_snapshot', False) # type: Optional[bool]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/evaluateSamples')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if use_snapshot is not None:
query_parameters['useSnapshot'] = _SERIALIZER.query("use_snapshot", use_snapshot, 'bool')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_flow_deploy_reserved_environment_variable_names_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/DeployReservedEnvironmentVariableNames')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_deploy_flow_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
async_call = kwargs.pop('async_call', False) # type: Optional[bool]
msi_token = kwargs.pop('msi_token', False) # type: Optional[bool]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/deploy')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if async_call is not None:
query_parameters['asyncCall'] = _SERIALIZER.query("async_call", async_call, 'bool')
if msi_token is not None:
query_parameters['msiToken'] = _SERIALIZER.query("msi_token", msi_token, 'bool')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_flow_run_log_content_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/runs/{flowRunId}/logContent')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_cancel_flow_run_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "text/plain, application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/runs/{flowRunId}/cancel')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_cancel_flow_test_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "text/plain, application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/flowTests/{flowRunId}/cancel')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_cancel_bulk_test_run_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
bulk_test_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "text/plain, application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/bulkTests/{bulkTestRunId}/cancel')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"bulkTestRunId": _SERIALIZER.url("bulk_test_run_id", bulk_test_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_flow_snapshot_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/FlowSnapshot')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_connection_override_settings_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
runtime_name = kwargs.pop('runtime_name', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/connectionOverride')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if runtime_name is not None:
query_parameters['runtimeName'] = _SERIALIZER.query("runtime_name", runtime_name, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_flow_inputs_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/flowInputs')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_load_as_component_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/LoadAsComponent')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_flow_tools_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
experiment_id = kwargs.pop('experiment_id') # type: str
flow_runtime_name = kwargs.pop('flow_runtime_name', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/flowTools')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if flow_runtime_name is not None:
query_parameters['flowRuntimeName'] = _SERIALIZER.query("flow_runtime_name", flow_runtime_name, 'str')
query_parameters['experimentId'] = _SERIALIZER.query("experiment_id", experiment_id, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_setup_flow_session_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
experiment_id = kwargs.pop('experiment_id') # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/sessions')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['experimentId'] = _SERIALIZER.query("experiment_id", experiment_id, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_flow_session_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
experiment_id = kwargs.pop('experiment_id') # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/sessions')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['experimentId'] = _SERIALIZER.query("experiment_id", experiment_id, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_flow_session_status_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
experiment_id = kwargs.pop('experiment_id') # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/sessions/status')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['experimentId'] = _SERIALIZER.query("experiment_id", experiment_id, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_flow_session_pip_packages_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
experiment_id = kwargs.pop('experiment_id') # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/sessions/pipPackages')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowId": _SERIALIZER.url("flow_id", flow_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['experimentId'] = _SERIALIZER.query("experiment_id", experiment_id, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class FlowsOperations(object):
"""FlowsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~flow.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def create_flow(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
experiment_id=None, # type: Optional[str]
body=None, # type: Optional["_models.CreateFlowRequest"]
**kwargs # type: Any
):
# type: (...) -> "_models.FlowDto"
"""create_flow.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_id:
:type experiment_id: str
:param body:
:type body: ~flow.models.CreateFlowRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowDto, or the result of cls(response)
:rtype: ~flow.models.FlowDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateFlowRequest')
else:
_json = None
request = build_create_flow_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
experiment_id=experiment_id,
template_url=self.create_flow.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FlowDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_flow.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows'} # type: ignore
@distributed_trace
def list_flows(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
experiment_id=None, # type: Optional[str]
owned_only=None, # type: Optional[bool]
flow_type=None, # type: Optional[Union[str, "_models.FlowType"]]
list_view_type=None, # type: Optional[Union[str, "_models.ListViewType"]]
**kwargs # type: Any
):
# type: (...) -> List["_models.FlowBaseDto"]
"""list_flows.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_id:
:type experiment_id: str
:param owned_only:
:type owned_only: bool
:param flow_type:
:type flow_type: str or ~flow.models.FlowType
:param list_view_type:
:type list_view_type: str or ~flow.models.ListViewType
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of FlowBaseDto, or the result of cls(response)
:rtype: list[~flow.models.FlowBaseDto]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.FlowBaseDto"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_flows_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
experiment_id=experiment_id,
owned_only=owned_only,
flow_type=flow_type,
list_view_type=list_view_type,
template_url=self.list_flows.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[FlowBaseDto]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_flows.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows'} # type: ignore
@distributed_trace
def update_flow(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
experiment_id, # type: str
body=None, # type: Optional["_models.UpdateFlowRequest"]
**kwargs # type: Any
):
# type: (...) -> str
"""update_flow.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param experiment_id:
:type experiment_id: str
:param body:
:type body: ~flow.models.UpdateFlowRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'UpdateFlowRequest')
else:
_json = None
request = build_update_flow_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
content_type=content_type,
experiment_id=experiment_id,
json=_json,
template_url=self.update_flow.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_flow.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}'} # type: ignore
@distributed_trace
def patch_flow(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
experiment_id, # type: str
body=None, # type: Optional["_models.PatchFlowRequest"]
**kwargs # type: Any
):
# type: (...) -> str
"""patch_flow.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param experiment_id:
:type experiment_id: str
:param body:
:type body: ~flow.models.PatchFlowRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json-patch+json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'PatchFlowRequest')
else:
_json = None
request = build_patch_flow_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
content_type=content_type,
experiment_id=experiment_id,
json=_json,
template_url=self.patch_flow.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
patch_flow.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}'} # type: ignore
@distributed_trace
def get_flow(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
experiment_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.FlowDto"
"""get_flow.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param experiment_id:
:type experiment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowDto, or the result of cls(response)
:rtype: ~flow.models.FlowDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
experiment_id=experiment_id,
template_url=self.get_flow.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FlowDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}'} # type: ignore
@distributed_trace
def submit_flow(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
experiment_id, # type: str
endpoint_name=None, # type: Optional[str]
body=None, # type: Optional["_models.SubmitFlowRequest"]
**kwargs # type: Any
):
# type: (...) -> "_models.FlowRunResult"
"""submit_flow.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param experiment_id:
:type experiment_id: str
:param endpoint_name:
:type endpoint_name: str
:param body:
:type body: ~flow.models.SubmitFlowRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowRunResult, or the result of cls(response)
:rtype: ~flow.models.FlowRunResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRunResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'SubmitFlowRequest')
else:
_json = None
request = build_submit_flow_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
experiment_id=experiment_id,
json=_json,
endpoint_name=endpoint_name,
template_url=self.submit_flow.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FlowRunResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
submit_flow.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/submit'} # type: ignore
@distributed_trace
def get_flow_run_status(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
flow_run_id, # type: str
experiment_id=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.FlowRunResult"
"""get_flow_run_status.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param flow_run_id:
:type flow_run_id: str
:param experiment_id:
:type experiment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowRunResult, or the result of cls(response)
:rtype: ~flow.models.FlowRunResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRunResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_run_status_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
flow_run_id=flow_run_id,
experiment_id=experiment_id,
template_url=self.get_flow_run_status.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FlowRunResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_run_status.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/{flowRunId}/status'} # type: ignore
@distributed_trace
def get_flow_run_info(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
flow_run_id, # type: str
experiment_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.FlowRunInfo"
"""get_flow_run_info.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param flow_run_id:
:type flow_run_id: str
:param experiment_id:
:type experiment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowRunInfo, or the result of cls(response)
:rtype: ~flow.models.FlowRunInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRunInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_run_info_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
flow_run_id=flow_run_id,
experiment_id=experiment_id,
template_url=self.get_flow_run_info.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FlowRunInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_run_info.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/runs/{flowRunId}'} # type: ignore
@distributed_trace
def get_flow_child_runs(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
flow_run_id, # type: str
index=None, # type: Optional[int]
start_index=None, # type: Optional[int]
end_index=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> List[Any]
"""get_flow_child_runs.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param flow_run_id:
:type flow_run_id: str
:param index:
:type index: int
:param start_index:
:type start_index: int
:param end_index:
:type end_index: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of any, or the result of cls(response)
:rtype: list[any]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List[Any]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_child_runs_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
flow_run_id=flow_run_id,
index=index,
start_index=start_index,
end_index=end_index,
template_url=self.get_flow_child_runs.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[object]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_child_runs.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/runs/{flowRunId}/childRuns'} # type: ignore
@distributed_trace
def get_flow_node_runs(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
flow_run_id, # type: str
node_name, # type: str
index=None, # type: Optional[int]
start_index=None, # type: Optional[int]
end_index=None, # type: Optional[int]
aggregation=False, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> List[Any]
"""get_flow_node_runs.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param flow_run_id:
:type flow_run_id: str
:param node_name:
:type node_name: str
:param index:
:type index: int
:param start_index:
:type start_index: int
:param end_index:
:type end_index: int
:param aggregation:
:type aggregation: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of any, or the result of cls(response)
:rtype: list[any]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List[Any]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_node_runs_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
flow_run_id=flow_run_id,
node_name=node_name,
index=index,
start_index=start_index,
end_index=end_index,
aggregation=aggregation,
template_url=self.get_flow_node_runs.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[object]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_node_runs.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/runs/{flowRunId}/nodeRuns/{nodeName}'} # type: ignore
@distributed_trace
def get_flow_node_run_base_path(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
flow_run_id, # type: str
node_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.FlowRunBasePath"
"""get_flow_node_run_base_path.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param flow_run_id:
:type flow_run_id: str
:param node_name:
:type node_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowRunBasePath, or the result of cls(response)
:rtype: ~flow.models.FlowRunBasePath
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRunBasePath"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_node_run_base_path_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
flow_run_id=flow_run_id,
node_name=node_name,
template_url=self.get_flow_node_run_base_path.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FlowRunBasePath', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_node_run_base_path.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/runs/{flowRunId}/nodeRuns/{nodeName}/basePath'} # type: ignore
@distributed_trace
def list_bulk_tests(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
experiment_id=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> List["_models.BulkTestDto"]
"""list_bulk_tests.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param experiment_id:
:type experiment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of BulkTestDto, or the result of cls(response)
:rtype: list[~flow.models.BulkTestDto]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.BulkTestDto"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_bulk_tests_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
experiment_id=experiment_id,
template_url=self.list_bulk_tests.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[BulkTestDto]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_bulk_tests.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/bulkTests'} # type: ignore
@distributed_trace
def get_bulk_test(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
bulk_test_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.BulkTestDto"
"""get_bulk_test.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param bulk_test_id:
:type bulk_test_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BulkTestDto, or the result of cls(response)
:rtype: ~flow.models.BulkTestDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BulkTestDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_bulk_test_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
bulk_test_id=bulk_test_id,
template_url=self.get_bulk_test.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('BulkTestDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_bulk_test.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/bulkTests/{bulkTestId}'} # type: ignore
@distributed_trace
def get_samples(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
use_snapshot=False, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> Dict[str, "_models.FlowSampleDto"]
"""get_samples.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param use_snapshot:
:type use_snapshot: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: dict mapping str to FlowSampleDto, or the result of cls(response)
:rtype: dict[str, ~flow.models.FlowSampleDto]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Dict[str, "_models.FlowSampleDto"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_samples_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
use_snapshot=use_snapshot,
template_url=self.get_samples.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('{FlowSampleDto}', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_samples.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/samples'} # type: ignore
@distributed_trace
def get_evaluate_flow_samples(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
use_snapshot=False, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> Dict[str, "_models.FlowSampleDto"]
"""get_evaluate_flow_samples.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param use_snapshot:
:type use_snapshot: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: dict mapping str to FlowSampleDto, or the result of cls(response)
:rtype: dict[str, ~flow.models.FlowSampleDto]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Dict[str, "_models.FlowSampleDto"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_evaluate_flow_samples_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
use_snapshot=use_snapshot,
template_url=self.get_evaluate_flow_samples.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('{FlowSampleDto}', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_evaluate_flow_samples.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/evaluateSamples'} # type: ignore
@distributed_trace
def get_flow_deploy_reserved_environment_variable_names(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> List[str]
"""get_flow_deploy_reserved_environment_variable_names.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of str, or the result of cls(response)
:rtype: list[str]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_deploy_reserved_environment_variable_names_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.get_flow_deploy_reserved_environment_variable_names.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[str]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_deploy_reserved_environment_variable_names.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/DeployReservedEnvironmentVariableNames'} # type: ignore
@distributed_trace
def deploy_flow(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
async_call=False, # type: Optional[bool]
msi_token=False, # type: Optional[bool]
body=None, # type: Optional["_models.DeployFlowRequest"]
**kwargs # type: Any
):
# type: (...) -> str
"""deploy_flow.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param async_call:
:type async_call: bool
:param msi_token:
:type msi_token: bool
:param body:
:type body: ~flow.models.DeployFlowRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'DeployFlowRequest')
else:
_json = None
request = build_deploy_flow_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
async_call=async_call,
msi_token=msi_token,
template_url=self.deploy_flow.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
deploy_flow.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/deploy'} # type: ignore
@distributed_trace
def get_flow_run_log_content(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> str
"""get_flow_run_log_content.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param flow_run_id:
:type flow_run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_run_log_content_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
flow_run_id=flow_run_id,
template_url=self.get_flow_run_log_content.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_run_log_content.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/runs/{flowRunId}/logContent'} # type: ignore
@distributed_trace
def cancel_flow_run(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> str
"""cancel_flow_run.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_run_id:
:type flow_run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_cancel_flow_run_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
template_url=self.cancel_flow_run.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
cancel_flow_run.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/runs/{flowRunId}/cancel'} # type: ignore
@distributed_trace
def cancel_flow_test(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> str
"""cancel_flow_test.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param flow_run_id:
:type flow_run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_cancel_flow_test_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
flow_run_id=flow_run_id,
template_url=self.cancel_flow_test.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
cancel_flow_test.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/flowTests/{flowRunId}/cancel'} # type: ignore
@distributed_trace
def cancel_bulk_test_run(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
bulk_test_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> str
"""cancel_bulk_test_run.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param bulk_test_run_id:
:type bulk_test_run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_cancel_bulk_test_run_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
bulk_test_run_id=bulk_test_run_id,
template_url=self.cancel_bulk_test_run.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
cancel_bulk_test_run.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/bulkTests/{bulkTestRunId}/cancel'} # type: ignore
@distributed_trace
def get_flow_snapshot(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
body=None, # type: Optional["_models.CreateFlowRequest"]
**kwargs # type: Any
):
# type: (...) -> "_models.FlowSnapshot"
"""get_flow_snapshot.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param body:
:type body: ~flow.models.CreateFlowRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowSnapshot, or the result of cls(response)
:rtype: ~flow.models.FlowSnapshot
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowSnapshot"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateFlowRequest')
else:
_json = None
request = build_get_flow_snapshot_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
template_url=self.get_flow_snapshot.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FlowSnapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_snapshot.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/FlowSnapshot'} # type: ignore
@distributed_trace
def get_connection_override_settings(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
runtime_name=None, # type: Optional[str]
body=None, # type: Optional["_models.FlowGraphReference"]
**kwargs # type: Any
):
# type: (...) -> List["_models.ConnectionOverrideSetting"]
"""get_connection_override_settings.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param runtime_name:
:type runtime_name: str
:param body:
:type body: ~flow.models.FlowGraphReference
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of ConnectionOverrideSetting, or the result of cls(response)
:rtype: list[~flow.models.ConnectionOverrideSetting]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ConnectionOverrideSetting"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'FlowGraphReference')
else:
_json = None
request = build_get_connection_override_settings_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
runtime_name=runtime_name,
template_url=self.get_connection_override_settings.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[ConnectionOverrideSetting]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_connection_override_settings.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/connectionOverride'} # type: ignore
@distributed_trace
def get_flow_inputs(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
body=None, # type: Optional["_models.FlowGraphReference"]
**kwargs # type: Any
):
# type: (...) -> Dict[str, "_models.FlowInputDefinition"]
"""get_flow_inputs.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param body:
:type body: ~flow.models.FlowGraphReference
:keyword callable cls: A custom type or function that will be passed the direct response
:return: dict mapping str to FlowInputDefinition, or the result of cls(response)
:rtype: dict[str, ~flow.models.FlowInputDefinition]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Dict[str, "_models.FlowInputDefinition"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'FlowGraphReference')
else:
_json = None
request = build_get_flow_inputs_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
template_url=self.get_flow_inputs.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('{FlowInputDefinition}', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_inputs.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/flowInputs'} # type: ignore
@distributed_trace
def load_as_component(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
body=None, # type: Optional["_models.LoadFlowAsComponentRequest"]
**kwargs # type: Any
):
# type: (...) -> str
"""load_as_component.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param body:
:type body: ~flow.models.LoadFlowAsComponentRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'LoadFlowAsComponentRequest')
else:
_json = None
request = build_load_as_component_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
template_url=self.load_as_component.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
load_as_component.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/LoadAsComponent'} # type: ignore
@distributed_trace
def get_flow_tools(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
experiment_id, # type: str
flow_runtime_name=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.FlowToolsDto"
"""get_flow_tools.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param experiment_id:
:type experiment_id: str
:param flow_runtime_name:
:type flow_runtime_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowToolsDto, or the result of cls(response)
:rtype: ~flow.models.FlowToolsDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowToolsDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_tools_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
experiment_id=experiment_id,
flow_runtime_name=flow_runtime_name,
template_url=self.get_flow_tools.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FlowToolsDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_tools.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/flowTools'} # type: ignore
@distributed_trace
def setup_flow_session(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
experiment_id, # type: str
body=None, # type: Optional["_models.SetupFlowSessionRequest"]
**kwargs # type: Any
):
# type: (...) -> Any
"""setup_flow_session.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param experiment_id:
:type experiment_id: str
:param body:
:type body: ~flow.models.SetupFlowSessionRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: any, or the result of cls(response)
:rtype: any
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Any]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'SetupFlowSessionRequest')
else:
_json = None
request = build_setup_flow_session_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
content_type=content_type,
experiment_id=experiment_id,
json=_json,
template_url=self.setup_flow_session.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
deserialized = self._deserialize('object', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
setup_flow_session.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/sessions'} # type: ignore
@distributed_trace
def delete_flow_session(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
experiment_id, # type: str
**kwargs # type: Any
):
# type: (...) -> Any
"""delete_flow_session.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param experiment_id:
:type experiment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: any, or the result of cls(response)
:rtype: any
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Any]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_flow_session_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
experiment_id=experiment_id,
template_url=self.delete_flow_session.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
deserialized = self._deserialize('object', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_flow_session.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/sessions'} # type: ignore
@distributed_trace
def get_flow_session_status(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
experiment_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.FlowSessionDto"
"""get_flow_session_status.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param experiment_id:
:type experiment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowSessionDto, or the result of cls(response)
:rtype: ~flow.models.FlowSessionDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowSessionDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_session_status_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
experiment_id=experiment_id,
template_url=self.get_flow_session_status.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FlowSessionDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_session_status.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/sessions/status'} # type: ignore
@distributed_trace
def list_flow_session_pip_packages(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_id, # type: str
experiment_id, # type: str
**kwargs # type: Any
):
# type: (...) -> str
"""list_flow_session_pip_packages.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_id:
:type flow_id: str
:param experiment_id:
:type experiment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_flow_session_pip_packages_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_id=flow_id,
experiment_id=experiment_id,
template_url=self.list_flow_session_pip_packages.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_flow_session_pip_packages.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Flows/{flowId}/sessions/pipPackages'} # type: ignore
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/operations/_flows_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/operations/_flows_operations.py",
"repo_id": "promptflow",
"token_count": 54883
} | 20 |
import asyncio
import contextvars
import functools
import json
from pathlib import Path
from typing import Optional, Union
import httpx
from azure.core.exceptions import HttpResponseError
from azure.storage.blob.aio import BlobServiceClient
from promptflow._sdk._constants import DEFAULT_ENCODING, DownloadedRun
from promptflow._sdk._errors import DownloadInternalError, RunNotFoundError, RunOperationError
from promptflow._sdk.entities import Run
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.exceptions import UserErrorException
logger = get_cli_sdk_logger()
class AsyncRunDownloader:
"""Download run results from the service asynchronously.
:param run: The run id.
:type run: str
:param run_ops: The run operations.
:type run_ops: ~promptflow.azure.operations.RunOperations
:param output_folder: The output folder to save the run results.
:type output_folder: Union[Path, str]
"""
IGNORED_PATTERN = ["__pycache__"]
def __init__(self, run: str, run_ops: "RunOperations", output_folder: Union[str, Path]) -> None:
self.run = run
self.run_ops = run_ops
self.datastore = run_ops._workspace_default_datastore
self.output_folder = Path(output_folder)
self.blob_service_client = self._init_blob_service_client()
self._use_flow_outputs = False # old runtime does not write debug_info output asset, use flow_outputs instead
def _init_blob_service_client(self):
logger.debug("Initializing blob service client.")
account_url = f"{self.datastore.account_name}.blob.{self.datastore.endpoint}"
return BlobServiceClient(account_url=account_url, credential=self.run_ops._credential)
async def download(self) -> str:
"""Download the run results asynchronously."""
error_msg_prefix = f"Failed to download run {self.run!r}"
try:
# pass verify=False to client to disable SSL verification.
# Source: https://github.com/encode/httpx/issues/1331
async with httpx.AsyncClient(verify=False) as client:
tasks = [
# put async functions in tasks to run in coroutines
self._download_artifacts_and_snapshot(client),
# below functions are actually synchronous functions in order to reuse code
# and use thread pool to avoid blocking the event loop
to_thread(self._download_run_metrics),
to_thread(self._download_run_logs),
]
await asyncio.gather(*tasks)
except RunNotFoundError as e:
raise RunOperationError(f"{error_msg_prefix}. Error: {e}") from e
except HttpResponseError as e:
if e.status_code == 403:
raise RunOperationError(
f"{error_msg_prefix}. User does not have permission to perform this operation on storage account "
f"{self.datastore.account_name!r} container {self.datastore.container_name!r}. "
f"Original azure blob error: {str(e)}"
)
else:
raise DownloadInternalError(f"{error_msg_prefix}. Error: {e}") from e
except Exception as e:
raise DownloadInternalError(f"{error_msg_prefix}. Error: {e}") from e
return self.output_folder.resolve().as_posix()
async def _download_artifacts_and_snapshot(self, httpx_client: httpx.AsyncClient):
run_data = await self._get_run_data_from_run_history(httpx_client)
logger.debug("Parsing run data from run history to get necessary information.")
# extract necessary information from run data
snapshot_id = run_data["runMetadata"]["properties"]["azureml.promptflow.snapshot_id"]
output_data = run_data["runMetadata"]["outputs"].get("debug_info", None)
if output_data is None:
logger.warning(
"Downloading run '%s' but the 'debug_info' output assets is not available, "
"maybe because the job ran on old version runtime, trying to get `flow_outputs` output asset instead.",
self.run,
)
self._use_flow_outputs = True
output_data = run_data["runMetadata"]["outputs"].get("flow_outputs", None)
output_asset_id = output_data["assetId"]
# save run metadata to run_metadata.json
logger.debug("Saving the run meta data.")
run_data = self.run_ops._refine_run_data_from_run_history(run_data)
run_data = Run._from_run_history_entity(run_data)
with open(self.output_folder / DownloadedRun.RUN_METADATA_FILE_NAME, "w", encoding=DEFAULT_ENCODING) as f:
json.dump(run_data._to_dict(), f, ensure_ascii=False)
async with self.blob_service_client:
container_name = self.datastore.container_name
logger.debug("Getting container client (%s) from workspace default datastore.", container_name)
container_client = self.blob_service_client.get_container_client(container_name)
async with container_client:
tasks = [
self._download_flow_artifacts(httpx_client, container_client, output_asset_id),
self._download_snapshot(httpx_client, container_client, snapshot_id),
]
await asyncio.gather(*tasks)
async def _get_run_data_from_run_history(self, client: httpx.AsyncClient):
"""Get the run data from the run history."""
logger.debug("Getting run data from run history.")
headers = self.run_ops._get_headers()
url = self.run_ops._run_history_endpoint_url + "/rundata"
payload = {
"runId": self.run,
"selectRunMetadata": True,
"selectRunDefinition": True,
"selectJobSpecification": True,
}
error_msg_prefix = "Failed to get run data from run history"
try:
response = await client.post(url, headers=headers, json=payload)
except Exception as e:
raise DownloadInternalError(f"{error_msg_prefix}. Error: {e}") from e
else:
if response.status_code == 200:
return response.json()
elif response.status_code == 404:
raise RunNotFoundError(f"{error_msg_prefix}. Run {self.run!r} not found.")
else:
raise DownloadInternalError(
f"{error_msg_prefix}. Code: {response.status_code}. Reason: {response.reason_phrase}"
)
def _download_run_metrics(
self,
):
"""Download the run metrics."""
logger.debug("Downloading run metrics.")
metrics = self.run_ops.get_metrics(self.run)
with open(self.output_folder / DownloadedRun.METRICS_FILE_NAME, "w", encoding=DEFAULT_ENCODING) as f:
json.dump(metrics, f, ensure_ascii=False)
logger.debug("Downloaded run metrics.")
async def _download_flow_artifacts(self, httpx_client: httpx.AsyncClient, container_client, output_data):
"""Download the output data."""
asset_path = await self._get_asset_path(httpx_client, output_data)
await self._download_blob_folder_from_asset_path(container_client, asset_path)
async def _download_blob_folder_from_asset_path(
self, container_client, asset_path: str, local_folder: Optional[Path] = None
):
"""Download the blob data from the data path."""
logger.debug("Downloading all blobs from data path prefix '%s'", asset_path)
if local_folder is None:
local_folder = self.output_folder
tasks = []
async for blob in container_client.list_blobs(name_starts_with=asset_path):
blob_client = container_client.get_blob_client(blob.name)
relative_path = Path(blob.name).relative_to(asset_path)
local_path = local_folder / relative_path
tasks.append(self._download_single_blob(blob_client, local_path))
await asyncio.gather(*tasks)
async def _download_single_blob(self, blob_client, local_path: Optional[Path] = None):
"""Download a single blob."""
if local_path is None:
local_path = Path(self.output_folder / blob_client.blob_name)
elif local_path.exists():
raise UserErrorException(f"Local file {local_path.resolve().as_posix()!r} already exists.")
# ignore some files
for item in self.IGNORED_PATTERN:
if item in blob_client.blob_name:
logger.warning(
"Ignoring file '%s' because it matches the ignored pattern '%s'", local_path.as_posix(), item
)
return None
logger.debug("Downloading blob '%s' to local path '%s'", blob_client.blob_name, local_path.resolve().as_posix())
local_path.parent.mkdir(parents=True, exist_ok=True)
async with blob_client:
with open(local_path, "wb") as f:
stream = await blob_client.download_blob()
async for chunk in stream.chunks():
f.write(chunk)
return local_path
async def _download_snapshot(self, httpx_client: httpx.AsyncClient, container_client, snapshot_id):
"""Download the flow snapshot."""
snapshot_urls = await self._get_flow_snapshot_urls(httpx_client, snapshot_id)
logger.debug("Downloading all snapshot blobs from snapshot urls.")
tasks = []
for url in snapshot_urls:
blob_name = url.split(self.datastore.container_name)[-1].lstrip("/")
blob_client = container_client.get_blob_client(blob_name)
relative_path = url.split(self.run)[-1].lstrip("/")
local_path = Path(self.output_folder / DownloadedRun.SNAPSHOT_FOLDER / relative_path)
tasks.append(self._download_single_blob(blob_client, local_path))
await asyncio.gather(*tasks)
async def _get_flow_snapshot_urls(self, httpx_client: httpx.AsyncClient, snapshot_id):
logger.debug("Getting flow snapshot blob urls from snapshot id with calling to content service.")
headers = self.run_ops._get_headers()
endpoint = self.run_ops._run_history_endpoint_url.replace("/history/v1.0", "/content/v2.0")
url = endpoint + "/snapshots/sas"
payload = {
"snapshotOrAssetId": snapshot_id,
}
error_msg_prefix = (
f"Failed to download flow snapshots with snapshot id {snapshot_id}, "
f"because the client failed to retrieve data from content service"
)
try:
response = await httpx_client.post(url, headers=headers, json=payload)
except Exception as e:
raise DownloadInternalError(f"{error_msg_prefix}. Error: {e}") from e
else:
if response.status_code == 200:
return self._parse_snapshot_response(response.json())
elif response.status_code == 404:
raise DownloadInternalError(f"{error_msg_prefix}. Error: Snapshot id not found.")
else:
raise DownloadInternalError(
f"{error_msg_prefix}. Code: {response.status_code}. Reason: {response.reason_phrase}"
)
async def _get_asset_path(self, client: httpx.AsyncClient, asset_id):
"""Get the asset path from asset id."""
logger.debug("Getting asset path from asset id with calling to data service.")
headers = self.run_ops._get_headers()
endpoint = self.run_ops._run_history_endpoint_url.replace("/history", "/data")
url = endpoint + "/dataversion/getByAssetId"
payload = {
"value": asset_id,
}
error_msg_prefix = "Failed to download flow artifacts due to failed to retrieve data from data service"
try:
response = await client.post(url, headers=headers, json=payload)
except Exception as e:
raise DownloadInternalError(f"{error_msg_prefix}. Error: {e}") from e
if response.status_code != 200:
raise DownloadInternalError(
f"{error_msg_prefix}. Code: {response.status_code}. Reason: {response.reason_phrase}"
)
response_data = response.json()
data_path = response_data["dataVersion"]["dataUri"].split("/paths/")[-1]
if self._use_flow_outputs:
data_path = data_path.replace("flow_outputs", "flow_artifacts")
return data_path
def _parse_snapshot_response(self, response: dict):
"""Parse the snapshot response."""
urls = []
if response["absoluteUrl"]:
urls.append(response["absoluteUrl"])
for value in response["children"].values():
urls += self._parse_snapshot_response(value)
return urls
def _download_run_logs(self):
"""Download the run logs."""
logger.debug("Downloading run logs.")
logs = self.run_ops._get_log(self.run)
with open(self.output_folder / DownloadedRun.LOGS_FILE_NAME, "w", encoding=DEFAULT_ENCODING) as f:
f.write(logs)
logger.debug("Downloaded run logs.")
@classmethod
def _from_run_operations(cls, run_ops: "RunOperations", run: str, output_folder: Union[str, Path]):
"""Create an instance from run operations."""
from azure.ai.ml.entities._datastore.azure_storage import AzureBlobDatastore
datastore = run_ops._workspace_default_datastore
if isinstance(datastore, AzureBlobDatastore):
return cls(run=run, run_ops=run_ops, output_folder=output_folder)
else:
raise UserErrorException(
f"Cannot download run {run!r} because the workspace default datastore is not supported. Supported ones "
f"are ['AzureBlobDatastore'], got {type(datastore).__name__!r}."
)
async def to_thread(func, /, *args, **kwargs):
# this is copied from asyncio.to_thread() in Python 3.9
# as it is not available in Python 3.8, which is the minimum supported version of promptflow
loop = asyncio.get_running_loop()
ctx = contextvars.copy_context()
func_call = functools.partial(ctx.run, func, *args, **kwargs)
return await loop.run_in_executor(None, func_call)
| promptflow/src/promptflow/promptflow/azure/operations/_async_run_downloader.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/operations/_async_run_downloader.py",
"repo_id": "promptflow",
"token_count": 6052
} | 21 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from dataclasses import dataclass, is_dataclass
from promptflow._core.tools_manager import register_connections
from promptflow._sdk.entities import (
AzureContentSafetyConnection,
AzureOpenAIConnection,
CognitiveSearchConnection,
CustomConnection,
FormRecognizerConnection,
OpenAIConnection,
SerpConnection,
CustomStrongTypeConnection,
)
from promptflow._sdk.entities._connection import _Connection
from promptflow.contracts.types import Secret
@dataclass
class BingConnection:
api_key: Secret
url: str = "https://api.bing.microsoft.com/v7.0/search"
# We should use unified connection class everywhere.
# Do not add new connection class definition directly here.
# !!!Attention!!!: Do not add external package connections here.
__all__ = [
"OpenAIConnection",
"AzureOpenAIConnection",
"AzureContentSafetyConnection",
"SerpConnection",
"CognitiveSearchConnection",
"FormRecognizerConnection",
"CustomConnection",
"CustomStrongTypeConnection",
]
register_connections(
[v for v in globals().values() if is_dataclass(v) or (isinstance(v, type) and issubclass(v, _Connection))]
)
| promptflow/src/promptflow/promptflow/connections/__init__.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/connections/__init__.py",
"repo_id": "promptflow",
"token_count": 399
} | 22 |
import inspect
from typing import Any, Callable, Dict, List, Mapping
from promptflow._utils.logger_utils import flow_logger
from promptflow.contracts.flow import InputAssignment, InputValueType, Node
from promptflow.executor import _input_assignment_parser
class DAGManager:
def __init__(self, nodes: List[Node], flow_inputs: dict):
self._nodes = nodes
self._flow_inputs = flow_inputs
self._pending_nodes = {node.name: node for node in nodes}
self._completed_nodes_outputs = {} # node name -> output
self._bypassed_nodes = {} # node name -> node
# TODO: Validate the DAG to avoid circular dependencies
@property
def completed_nodes_outputs(self) -> Dict[str, Any]:
return self._completed_nodes_outputs
@property
def bypassed_nodes(self) -> Dict[str, Node]:
return self._bypassed_nodes
def pop_ready_nodes(self) -> List[Node]:
"""Returns a list of node names that are ready, and removes them from the list of nodes to be processed."""
ready_nodes: List[Node] = []
for node in self._pending_nodes.values():
if self._is_node_ready(node):
ready_nodes.append(node)
for node in ready_nodes:
del self._pending_nodes[node.name]
return ready_nodes
def pop_bypassable_nodes(self) -> List[Node]:
"""Returns a list of nodes that are bypassed, and removes them from the list of nodes to be processed."""
# Confirm node should be bypassed
bypassed_nodes: List[Node] = []
for node in self._pending_nodes.values():
if self._is_node_ready(node) and self._is_node_bypassable(node):
self._bypassed_nodes[node.name] = node
bypassed_nodes.append(node)
for node in bypassed_nodes:
del self._pending_nodes[node.name]
return bypassed_nodes
def get_node_valid_inputs(self, node: Node, f: Callable) -> Mapping[str, Any]:
"""Returns the valid inputs for the node, including the flow inputs, literal values and
the outputs of completed nodes. The valid inputs are determined by the function of the node.
:param node: The node for which to determine the valid inputs.
:type node: Node
:param f: The function of the current node, which is used to determine the valid inputs.
In the case when node dependency is bypassed, the input is not required when parameter has default value,
and the input is set to None when parameter has no default value.
:type f: Callable
:return: A dictionary mapping each valid input name to its value.
:rtype: dict
"""
results = {}
signature = inspect.signature(f).parameters
for name, i in (node.inputs or {}).items():
if self._is_node_dependency_bypassed(i):
# If the parameter has default value, the input will not be set so that the default value will be used.
if signature.get(name) is not None and signature[name].default is not inspect.Parameter.empty:
continue
# If the parameter has no default value, the input will be set to None so that function will not fail.
else:
flow_logger.warning(
f"The node '{i.value}' referenced by the input '{name}' of the current node '{node.name}' "
"has been bypassed, and no default value is set. Will use 'None' as the value for this input."
)
results[name] = None
else:
results[name] = self._get_node_dependency_value(i)
return results
def complete_nodes(self, nodes_outputs: Mapping[str, Any]):
"""Marks nodes as completed with the mapping from node names to their outputs."""
self._completed_nodes_outputs.update(nodes_outputs)
def completed(self) -> bool:
"""Returns True if all nodes have been processed."""
return all(
node.name in self._completed_nodes_outputs or node.name in self._bypassed_nodes for node in self._nodes
)
def _is_node_ready(self, node: Node) -> bool:
"""Returns True if the node is ready to be executed."""
node_dependencies = [i for i in node.inputs.values()]
# Add activate conditions as node dependencies
if node.activate:
node_dependencies.append(node.activate.condition)
for node_dependency in node_dependencies:
if (
node_dependency.value_type == InputValueType.NODE_REFERENCE
and node_dependency.value not in self._completed_nodes_outputs
and node_dependency.value not in self._bypassed_nodes
):
return False
return True
def _is_node_bypassable(self, node: Node) -> bool:
"""Returns True if the node should be bypassed."""
# Bypass node if the activate condition is not met
if node.activate:
# If the node referenced by activate condition is bypassed, the current node should be bypassed
if self._is_node_dependency_bypassed(node.activate.condition):
flow_logger.info(
f"The node '{node.name}' will be bypassed because it depends on the node "
f"'{node.activate.condition.value}' which has already been bypassed in the activate config."
)
return True
# If a node has activate config, we will always use this config
# to determine whether the node should be bypassed.
activate_condition = InputAssignment.serialize(node.activate.condition)
if not self._is_condition_met(node.activate.condition, node.activate.condition_value):
flow_logger.info(
f"The node '{node.name}' will be bypassed because the activate condition is not met, "
f"i.e. '{activate_condition}' is not equal to '{node.activate.condition_value}'."
)
return True
else:
flow_logger.info(
f"The node '{node.name}' will be executed because the activate condition is met, "
f"i.e. '{activate_condition}' is equal to '{node.activate.condition_value}'."
)
return False
# Bypass node if all of its node reference dependencies are bypassed
node_dependencies = [i for i in node.inputs.values() if i.value_type == InputValueType.NODE_REFERENCE]
all_dependencies_bypassed = node_dependencies and all(
self._is_node_dependency_bypassed(dependency) for dependency in node_dependencies
)
if all_dependencies_bypassed:
node_dependencies_list = [dependency.value for dependency in node_dependencies]
flow_logger.info(
f"The node '{node.name}' will be bypassed because all nodes "
f"{node_dependencies_list} it depends on are bypassed."
)
return all_dependencies_bypassed
def _is_condition_met(self, condition: InputAssignment, condition_value) -> bool:
condition = self._get_node_dependency_value(condition)
return condition == condition_value
def _get_node_dependency_value(self, node_dependency: InputAssignment):
return _input_assignment_parser.parse_value(node_dependency, self._completed_nodes_outputs, self._flow_inputs)
def _is_node_dependency_bypassed(self, dependency: InputAssignment) -> bool:
"""Returns True if the node dependency is bypassed.
There are two types of the node dependency:
1. The inputs of the node
2. The activate condition of the node
"""
return dependency.value_type == InputValueType.NODE_REFERENCE and dependency.value in self._bypassed_nodes
| promptflow/src/promptflow/promptflow/executor/_dag_manager.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/executor/_dag_manager.py",
"repo_id": "promptflow",
"token_count": 3269
} | 23 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from pathlib import Path
from typing import Any, Mapping
from pydantic import BaseModel
from promptflow.contracts.run_mode import RunMode
from promptflow.executor._service._errors import FlowFilePathInvalid
class BaseExecutionRequest(BaseModel):
run_id: str
working_dir: Path
flow_file: Path
output_dir: Path
connections: Mapping[str, Any] = None
environment_variables: Mapping[str, Any] = None
log_path: str
def get_run_mode(self):
raise NotImplementedError(f"Request type {self.__class__.__name__} is not implemented.")
def validate_request(self):
if self.flow_file.is_absolute():
raise FlowFilePathInvalid(
message_format=(
"The flow file path ({flow_file}) is invalid. The path should be relative to the working directory."
),
flow_file=self.flow_file.as_posix(),
)
class FlowExecutionRequest(BaseExecutionRequest):
inputs: Mapping[str, Any] = None
def get_run_mode(self):
return RunMode.Test
class NodeExecutionRequest(BaseExecutionRequest):
node_name: str
flow_inputs: Mapping[str, Any] = None
dependency_nodes_outputs: Mapping[str, Any] = None
def get_run_mode(self):
return RunMode.SingleNode
| promptflow/src/promptflow/promptflow/executor/_service/contracts/execution_request.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/executor/_service/contracts/execution_request.py",
"repo_id": "promptflow",
"token_count": 528
} | 24 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import os
import re
from pathlib import Path
from typing import Any, Match, cast
from setuptools import find_packages, setup
PACKAGE_NAME = "promptflow"
PACKAGE_FOLDER_PATH = Path(__file__).parent / "promptflow"
with open(os.path.join(PACKAGE_FOLDER_PATH, "_version.py"), encoding="utf-8") as f:
version = cast(Match[Any], re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE)).group(1)
with open("README.md", encoding="utf-8") as f:
readme = f.read()
with open("CHANGELOG.md", encoding="utf-8") as f:
changelog = f.read()
REQUIRES = [
"psutil", # get process information when bulk run
"httpx>=0.25.1", # used to send http requests asynchronously
"openai", # promptflow._core.api_injector
"flask>=2.2.3,<4.0.0", # Serving endpoint requirements
"sqlalchemy>=1.4.48,<3.0.0", # sqlite requirements
# note that pandas 1.5.3 is the only version to test in ci before promptflow 0.1.0b7 is released
# and pandas 2.x.x will be the only version to test in ci after that.
"pandas>=1.5.3,<3.0.0", # load data requirements
"python-dotenv>=1.0.0,<2.0.0", # control plane sdk requirements, to load .env file
"keyring>=24.2.0,<25.0.0", # control plane sdk requirements, to access system keyring service
"pydash>=6.0.0,<8.0.0", # control plane sdk requirements, to support parameter overrides in schema.
# vulnerability: https://github.com/advisories/GHSA-5cpq-8wj7-hf2v
"cryptography>=41.0.3,<42.0.0", # control plane sdk requirements to support connection encryption
"colorama>=0.4.6,<0.5.0", # producing colored terminal text for testing chat flow
"tabulate>=0.9.0,<1.0.0", # control plane sdk requirements, to print table in console
"filelock>=3.4.0,<4.0.0", # control plane sdk requirements, to lock for multiprocessing
# We need to pin the version due to the issue: https://github.com/hwchase17/langchain/issues/5113
"marshmallow>=3.5,<4.0.0",
"gitpython>=3.1.24,<4.0.0", # used git info to generate flow id
"tiktoken>=0.4.0",
"strictyaml>=1.5.0,<2.0.0", # used to identify exact location of validation error
"waitress>=2.1.2,<3.0.0", # used to serve local service
"opencensus-ext-azure<2.0.0", # configure opencensus to send telemetry to azure monitor
"ruamel.yaml>=0.17.10,<1.0.0", # used to generate connection templates with preserved comments
"pyarrow>=14.0.1,<15.0.0", # used to read parquet file with pandas.read_parquet
"pillow>=10.1.0,<11.0.0", # used to generate icon data URI for package tool
"filetype>=1.2.0", # used to detect the mime type for mulitmedia input
"jsonschema>=4.0.0,<5.0.0", # used to validate tool
"docutils", # used to generate description for tools
"opentelemetry-exporter-otlp-proto-http>=1.22.0,<2.0.0", # trace support
"flask-restx>=1.2.0,<2.0.0", # PFS Swagger
]
setup(
name=PACKAGE_NAME,
version=version,
description="Prompt flow Python SDK - build high-quality LLM apps",
long_description_content_type="text/markdown",
long_description=readme + "\n\n" + changelog,
license="MIT License",
author="Microsoft Corporation",
author_email="[email protected]",
url="https://github.com/microsoft/promptflow",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires="<4.0,>=3.8",
install_requires=REQUIRES,
extras_require={
"azure": [
"azure-core>=1.26.4,<2.0.0",
"azure-storage-blob[aio]>=12.13.0,<13.0.0", # add [aio] for async run download feature
"azure-identity>=1.12.0,<2.0.0",
"azure-ai-ml>=1.11.0,<2.0.0",
"pyjwt>=2.4.0,<3.0.0", # requirement of control plane SDK
],
"executable": ["pyinstaller>=5.13.2", "streamlit>=1.26.0", "streamlit-quill<0.1.0", "bs4"],
"azureml-serving": [
# AzureML connection dependencies
"azure-identity>=1.12.0,<2.0.0",
"azure-ai-ml>=1.11.0,<2.0.0",
"azure-monitor-opentelemetry-exporter>=1.0.0b21,<2.0.0",
# MDC dependencies for monitoring
"azureml-ai-monitoring>=0.1.0b3,<1.0.0",
],
"executor-service": [
"fastapi>=0.109.0,<1.0.0", # used to build web executor server
],
},
packages=find_packages(),
scripts=["pf", "pf.bat"],
entry_points={
"console_scripts": [
"pfazure = promptflow._cli._pf_azure.entry:main",
"pfs = promptflow._sdk._service.entry:main",
],
},
include_package_data=True,
project_urls={
"Bug Reports": "https://github.com/microsoft/promptflow/issues",
"Source": "https://github.com/microsoft/promptflow",
},
)
| promptflow/src/promptflow/setup.py/0 | {
"file_path": "promptflow/src/promptflow/setup.py",
"repo_id": "promptflow",
"token_count": 2281
} | 25 |
# Create run with automatic runtime
A prompt flow runtime provides computing resources that are required for the application to run, including a Docker image that contains all necessary dependency packages. This reliable and scalable runtime environment enables prompt flow to efficiently execute its tasks and functions for a seamless user experience.
If you're a new user, we recommend that you use the automatic runtime (preview). You can easily customize the environment by adding packages in the requirements.txt file in flow.dag.yaml in the flow folder.
## Create a run with automatic runtime
Create a run with automatic runtime is simple, just omit the `runtime` field and system will use automatic runtime to create a session to execute.
::::{tab-set}
:::{tab-item} CLI
:sync: CLI
```bash
pfazure run create --flow path/to/flow --data path/to/data --stream
```
:::
:::{tab-item} SDK
:sync: SDK
```python
from promptflow.azure import PFClient
pf = PFClient(
credential=credential,
subscription_id="<SUBSCRIPTION_ID>", # this will look like xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name="<RESOURCE_GROUP>",
workspace_name="<AML_WORKSPACE_NAME>",
)
pf.run(
flow=flow,
data=data,
)
```
:::
::::
## Specify pip requirements for automatic runtime
If `requirements.txt` exists in the same folder with `flow.dag.yaml`.
The dependencies in it will be automatically installed for automatic runtime.
You can also specify which requirements file to use in `flow.dag.yaml` like this:
```yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
environment:
python_requirements_txt: path/to/requirement/file
...
```
Reference [Flow YAML Schema](../../../reference/flow-yaml-schema-reference.md) for details.
## Customize automatic runtime
In automatic runtime case, you can also specify the instance type, if you don't specify the instance type, Azure Machine Learning chooses an instance type (VM size) based on factors like quota, cost, performance and disk size, learn more about [serverless compute](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-use-serverless-compute).
```yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Run.schema.json
flow: <path_to_flow>
data: <path_to_flow>/data.jsonl
column_mapping:
url: ${data.url}
# define instance type only work for automatic runtime, will be ignored if you specify the runtime name.
resources:
instance_type: <instance_type>
```
::::{tab-set}
:::{tab-item} CLI
:sync: CLI
```bash
pfazure run create --file run.yml
```
:::
:::{tab-item} SDK
:sync: SDK
```python
from promptflow import load_run
run = load_run(source="run.yml")
pf = PFClient(
credential=credential,
subscription_id="<SUBSCRIPTION_ID>", # this will look like xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name="<RESOURCE_GROUP>",
workspace_name="<AML_WORKSPACE_NAME>",
)
pf.runs.create_or_update(
run=run
)
```
:::
::::
## Next steps
- Try the example [here](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/run-management/cloud-run-management.ipynb).
| promptflow/docs/cloud/azureai/quick-start/create-run-with-automatic-runtime.md/0 | {
"file_path": "promptflow/docs/cloud/azureai/quick-start/create-run-with-automatic-runtime.md",
"repo_id": "promptflow",
"token_count": 990
} | 0 |
# Deploy a flow using Docker
:::{admonition} Experimental feature
This is an experimental feature, and may change at any time. Learn [more](../faq.md#stable-vs-experimental).
:::
There are two steps to deploy a flow using docker:
1. Build the flow as docker format.
2. Build and run the docker image.
## Build a flow as docker format
::::{tab-set}
:::{tab-item} CLI
:sync: CLI
Use the command below to build a flow as docker format:
```bash
pf flow build --source <path-to-your-flow-folder> --output <your-output-dir> --format docker
```
:::
:::{tab-item} VS Code Extension
:sync: VSC
In visual editor, choose:
![img](../../media/how-to-guides/vscode_export.png)
Click the button below to build a flow as docker format:
![img](../../media/how-to-guides/vscode_export_as_docker.png)
:::
::::
Note that all dependent connections must be created before exporting as docker.
### Docker format folder structure
Exported Dockerfile & its dependencies are located in the same folder. The structure is as below:
- flow: the folder contains all the flow files
- ...
- connections: the folder contains yaml files to create all related connections
- ...
- Dockerfile: the dockerfile to build the image
- start.sh: the script used in `CMD` of `Dockerfile` to start the service
- runit: the folder contains all the runit scripts
- ...
- settings.json: a json file to store the settings of the docker image
- README.md: Simple introduction of the files
## Deploy with Docker
We are going to use the [web-classification](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard/web-classification/) as
an example to show how to deploy with docker.
Please ensure you have [create the connection](../manage-connections.md#create-a-connection) required by flow, if not, you could
refer to [Setup connection for web-classification](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard/web-classification).
## Build a flow as docker format app
Use the command below to build a flow as docker format app:
```bash
pf flow build --source ../../flows/standard/web-classification --output dist --format docker
```
Note that all dependent connections must be created before exporting as docker.
### Build Docker image
Like other Dockerfile, you need to build the image first. You can tag the image with any name you want. In this example, we use `web-classification-serve`.
Run the command below to build image:
```bash
docker build dist -t web-classification-serve
```
### Run Docker image
Run the docker image will start a service to serve the flow inside the container.
#### Connections
If the service involves connections, all related connections will be exported as yaml files and recreated in containers.
Secrets in connections won't be exported directly. Instead, we will export them as a reference to environment variables:
```yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/OpenAIConnection.schema.json
type: open_ai
name: open_ai_connection
module: promptflow.connections
api_key: ${env:OPEN_AI_CONNECTION_API_KEY} # env reference
```
You'll need to set up the environment variables in the container to make the connections work.
### Run with `docker run`
You can run the docker image directly set via below commands:
```bash
# The started service will listen on port 8080.You can map the port to any port on the host machine as you want.
docker run -p 8080:8080 -e OPEN_AI_CONNECTION_API_KEY=<secret-value> web-classification-serve
```
### Test the endpoint
After start the service, you can use curl to test it:
```bash
curl http://localhost:8080/score --data '{"url":"https://play.google.com/store/apps/details?id=com.twitter.android"}' -X POST -H "Content-Type: application/json"
```
## Next steps
- Try the example [here](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/flow-deploy/docker).
- See how to [deploy a flow using kubernetes](deploy-using-kubernetes.md).
| promptflow/docs/how-to-guides/deploy-a-flow/deploy-using-docker.md/0 | {
"file_path": "promptflow/docs/how-to-guides/deploy-a-flow/deploy-using-docker.md",
"repo_id": "promptflow",
"token_count": 1143
} | 1 |
# Develop a tool
We provide guides on how to develop a tool and use it.
```{toctree}
:maxdepth: 1
:hidden:
create-and-use-tool-package
add-a-tool-icon
add-category-and-tags-for-tool
use-file-path-as-tool-input
customize_an_llm_tool
create-cascading-tool-inputs
create-your-own-custom-strong-type-connection
create-dynamic-list-tool-input
```
| promptflow/docs/how-to-guides/develop-a-tool/index.md/0 | {
"file_path": "promptflow/docs/how-to-guides/develop-a-tool/index.md",
"repo_id": "promptflow",
"token_count": 130
} | 2 |
# Integrations
The Integrations section contains documentation on custom extensions created by the community that expand prompt flow's capabilities.
These include tools that enrich flows, as well as tutorials on innovative ways to use prompt flow.
```{toctree}
:maxdepth: 1
tools/index
llms/index
``` | promptflow/docs/integrations/index.md/0 | {
"file_path": "promptflow/docs/integrations/index.md",
"repo_id": "promptflow",
"token_count": 74
} | 3 |
# Prompt
## Introduction
The Prompt Tool in PromptFlow offers a collection of textual templates that serve as a starting point for creating prompts.
These templates, based on the Jinja2 template engine, facilitate the definition of prompts. The tool proves useful
when prompt tuning is required prior to feeding the prompts into the Language Model (LLM) model in PromptFlow.
## Inputs
| Name | Type | Description | Required |
|--------------------|--------|----------------------------------------------------------|----------|
| prompt | string | The prompt template in Jinja | Yes |
| Inputs | - | List of variables of prompt template and its assignments | - |
## Outputs
The prompt text parsed from the prompt + Inputs
## How to write Prompt?
1. Prepare jinja template. Learn more about [Jinja](https://jinja.palletsprojects.com/en/3.1.x/)
_In below example, the prompt incorporates Jinja templating syntax to dynamically generate the welcome message and personalize it based on the user's name. It also presents a menu of options for the user to choose from. Depending on whether the user_name variable is provided, it either addresses the user by name or uses a generic greeting._
```jinja
Welcome to {{ website_name }}!
{% if user_name %}
Hello, {{ user_name }}!
{% else %}
Hello there!
{% endif %}
Please select an option from the menu below:
1. View your account
2. Update personal information
3. Browse available products
4. Contact customer support
```
2. Assign value for the variables.
_In above example, two variables would be automatically detected and listed in '**Inputs**' section. Please assign values._
### Sample 1
Inputs
| Variable | Type | Sample Value |
|---------------|--------|--------------|
| website_name | string | "Microsoft" |
| user_name | string | "Jane" |
Outputs
```
Welcome to Microsoft! Hello, Jane! Please select an option from the menu below: 1. View your account 2. Update personal information 3. Browse available products 4. Contact customer support
```
### Sample 2
Inputs
| Variable | Type | Sample Value |
|--------------|--------|----------------|
| website_name | string | "Bing" |
| user_name | string | " |
Outputs
```
Welcome to Bing! Hello there! Please select an option from the menu below: 1. View your account 2. Update personal information 3. Browse available products 4. Contact customer support
``` | promptflow/docs/reference/tools-reference/prompt-tool.md/0 | {
"file_path": "promptflow/docs/reference/tools-reference/prompt-tool.md",
"repo_id": "promptflow",
"token_count": 783
} | 4 |
import functools
import json
import re
import sys
import time
from typing import List, Mapping
from jinja2 import Template
from openai import APIConnectionError, APIStatusError, OpenAIError, RateLimitError, APITimeoutError
from promptflow.tools.exception import ChatAPIInvalidRole, WrappedOpenAIError, LLMError, JinjaTemplateError, \
ExceedMaxRetryTimes, ChatAPIInvalidFunctions, FunctionCallNotSupportedInStreamMode, \
ChatAPIFunctionRoleInvalidFormat, InvalidConnectionType
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection
from promptflow.exceptions import SystemErrorException, UserErrorException
class ChatInputList(list):
"""
ChatInputList is a list of ChatInput objects. It is used to override the __str__ method of list to return a string
that can be easily parsed as message list.
"""
def __init__(self, iterable=None):
super().__init__(iterable or [])
def __str__(self):
return "\n".join(map(str, self))
def validate_role(role: str, valid_roles: List[str] = None):
if not valid_roles:
valid_roles = ["assistant", "function", "user", "system"]
if role not in valid_roles:
valid_roles_str = ','.join([f'\'{role}:\\n\'' for role in valid_roles])
error_message = (
f"The Chat API requires a specific format for prompt definition, and the prompt should include separate "
f"lines as role delimiters: {valid_roles_str}. Current parsed role '{role}'"
f" does not meet the requirement. If you intend to use the Completion API, please select the appropriate"
f" API type and deployment name. If you do intend to use the Chat API, please refer to the guideline at "
f"https://aka.ms/pfdoc/chat-prompt or view the samples in our gallery that contain 'Chat' in the name."
)
raise ChatAPIInvalidRole(message=error_message)
def validate_functions(functions):
function_example = json.dumps({
"name": "function_name",
"parameters": {
"type": "object",
"properties": {
"parameter_name": {
"type": "integer",
"description": "parameter_description"
}
}
},
"description": "function_description"
})
common_tsg = f"Here is a valid function example: {function_example}. See more details at " \
"https://platform.openai.com/docs/api-reference/chat/create#chat/create-functions " \
"or view sample 'How to use functions with chat models' in our gallery."
if len(functions) == 0:
raise ChatAPIInvalidFunctions(message=f"functions cannot be an empty list. {common_tsg}")
else:
for i, function in enumerate(functions):
# validate if the function is a dict
if not isinstance(function, dict):
raise ChatAPIInvalidFunctions(message=f"function {i} '{function}' is not a dict. {common_tsg}")
# validate if has required keys
for key in ["name", "parameters"]:
if key not in function.keys():
raise ChatAPIInvalidFunctions(
message=f"function {i} '{function}' does not have '{key}' property. {common_tsg}")
# validate if the parameters is a dict
if not isinstance(function["parameters"], dict):
raise ChatAPIInvalidFunctions(
message=f"function {i} '{function['name']}' parameters '{function['parameters']}' "
f"should be described as a JSON Schema object. {common_tsg}")
# validate if the parameters has required keys
for key in ["type", "properties"]:
if key not in function["parameters"].keys():
raise ChatAPIInvalidFunctions(
message=f"function {i} '{function['name']}' parameters '{function['parameters']}' "
f"does not have '{key}' property. {common_tsg}")
# validate if the parameters type is object
if function["parameters"]["type"] != "object":
raise ChatAPIInvalidFunctions(
message=f"function {i} '{function['name']}' parameters 'type' "
f"should be 'object'. {common_tsg}")
# validate if the parameters properties is a dict
if not isinstance(function["parameters"]["properties"], dict):
raise ChatAPIInvalidFunctions(
message=f"function {i} '{function['name']}' parameters 'properties' "
f"should be described as a JSON Schema object. {common_tsg}")
def try_parse_name_and_content(role_prompt):
# customer can add ## in front of name/content for markdown highlight.
# and we still support name/content without ## prefix for backward compatibility.
pattern = r"\n*#{0,2}\s*name:\n+\s*(\S+)\s*\n*#{0,2}\s*content:\n?(.*)"
match = re.search(pattern, role_prompt, re.DOTALL)
if match:
return match.group(1), match.group(2)
return None
def parse_chat(chat_str, images: List = None, valid_roles: List[str] = None):
if not valid_roles:
valid_roles = ["system", "user", "assistant", "function"]
# openai chat api only supports below roles.
# customer can add single # in front of role name for markdown highlight.
# and we still support role name without # prefix for backward compatibility.
separator = r"(?i)^\s*#?\s*(" + "|".join(valid_roles) + r")\s*:\s*\n"
images = images or []
hash2images = {str(x): x for x in images}
chunks = re.split(separator, chat_str, flags=re.MULTILINE)
chat_list = []
for chunk in chunks:
last_message = chat_list[-1] if len(chat_list) > 0 else None
if last_message and "role" in last_message and "content" not in last_message:
parsed_result = try_parse_name_and_content(chunk)
if parsed_result is None:
# "name" is required if the role is "function"
if last_message["role"] == "function":
raise ChatAPIFunctionRoleInvalidFormat(
message="Failed to parse function role prompt. Please make sure the prompt follows the "
"format: 'name:\\nfunction_name\\ncontent:\\nfunction_content'. "
"'name' is required if role is function, and it should be the name of the function "
"whose response is in the content. May contain a-z, A-Z, 0-9, and underscores, "
"with a maximum length of 64 characters. See more details in "
"https://platform.openai.com/docs/api-reference/chat/create#chat/create-name "
"or view sample 'How to use functions with chat models' in our gallery.")
# "name" is optional for other role types.
else:
last_message["content"] = to_content_str_or_list(chunk, hash2images)
else:
last_message["name"] = parsed_result[0]
last_message["content"] = to_content_str_or_list(parsed_result[1], hash2images)
else:
if chunk.strip() == "":
continue
# Check if prompt follows chat api message format and has valid role.
# References: https://platform.openai.com/docs/api-reference/chat/create.
role = chunk.strip().lower()
validate_role(role, valid_roles=valid_roles)
new_message = {"role": role}
chat_list.append(new_message)
return chat_list
def to_content_str_or_list(chat_str: str, hash2images: Mapping):
chat_str = chat_str.strip()
chunks = chat_str.split("\n")
include_image = False
result = []
for chunk in chunks:
if chunk.strip() in hash2images:
image_message = {}
image_message["type"] = "image_url"
image_url = hash2images[chunk.strip()].source_url \
if hasattr(hash2images[chunk.strip()], "source_url") else None
if not image_url:
image_bs64 = hash2images[chunk.strip()].to_base64()
image_mine_type = hash2images[chunk.strip()]._mime_type
image_url = {"url": f"data:{image_mine_type};base64,{image_bs64}"}
image_message["image_url"] = image_url
result.append(image_message)
include_image = True
elif chunk.strip() == "":
continue
else:
result.append({"type": "text", "text": chunk})
return result if include_image else chat_str
def handle_openai_error(tries: int = 10, delay: float = 8.0):
"""
A decorator function that used to handle OpenAI error.
OpenAI Error falls into retriable vs non-retriable ones.
For retriable error, the decorator use below parameters to control its retry activity with exponential backoff:
`tries` : max times for the function invocation, type is int
'delay': base delay seconds for exponential delay, type is float
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
for i in range(tries + 1):
try:
return func(*args, **kwargs)
except (SystemErrorException, UserErrorException) as e:
# Throw inner wrapped exception directly
raise e
except (APIStatusError, APIConnectionError) as e:
# Handle retriable exception, please refer to
# https://platform.openai.com/docs/guides/error-codes/api-errors
print(f"Exception occurs: {type(e).__name__}: {str(e)}", file=sys.stderr)
if isinstance(e, APIConnectionError) and not isinstance(e, APITimeoutError) \
and "connection aborted" not in str(e).lower():
raise WrappedOpenAIError(e)
# Retry InternalServerError(>=500), RateLimitError(429), UnprocessableEntityError(422)
if isinstance(e, APIStatusError):
status_code = e.response.status_code
if status_code < 500 and status_code not in [429, 422]:
raise WrappedOpenAIError(e)
if isinstance(e, RateLimitError) and getattr(e, "type", None) == "insufficient_quota":
# Exit retry if this is quota insufficient error
print(f"{type(e).__name__} with insufficient quota. Throw user error.", file=sys.stderr)
raise WrappedOpenAIError(e)
if i == tries:
# Exit retry if max retry reached
print(f"{type(e).__name__} reached max retry. Exit retry with user error.", file=sys.stderr)
raise ExceedMaxRetryTimes(e)
if hasattr(e, 'response') and e.response is not None:
retry_after_in_header = e.response.headers.get("retry-after", None)
else:
retry_after_in_header = None
if not retry_after_in_header:
retry_after_seconds = delay * (2 ** i)
msg = (
f"{type(e).__name__} #{i}, but no Retry-After header, "
+ f"Back off {retry_after_seconds} seconds for retry."
)
print(msg, file=sys.stderr)
else:
retry_after_seconds = float(retry_after_in_header) * (2 ** i)
msg = (
f"{type(e).__name__} #{i}, Retry-After={retry_after_in_header}, "
f"Back off {retry_after_seconds} seconds for retry."
)
print(msg, file=sys.stderr)
time.sleep(retry_after_seconds)
except OpenAIError as e:
# For other non-retriable errors from OpenAIError,
# For example, AuthenticationError, APIConnectionError, BadRequestError, NotFoundError
# Mark UserError for all the non-retriable OpenAIError
print(f"Exception occurs: {type(e).__name__}: {str(e)}", file=sys.stderr)
raise WrappedOpenAIError(e)
except Exception as e:
print(f"Exception occurs: {type(e).__name__}: {str(e)}", file=sys.stderr)
error_message = f"OpenAI API hits exception: {type(e).__name__}: {str(e)}"
raise LLMError(message=error_message)
return wrapper
return decorator
def to_bool(value) -> bool:
return str(value).lower() == "true"
def render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **kwargs):
try:
return Template(prompt, trim_blocks=trim_blocks, keep_trailing_newline=keep_trailing_newline).render(**kwargs)
except Exception as e:
# For exceptions raised by jinja2 module, mark UserError
print(f"Exception occurs: {type(e).__name__}: {str(e)}", file=sys.stderr)
error_message = f"Failed to render jinja template: {type(e).__name__}: {str(e)}. " \
+ "Please modify your prompt to fix the issue."
raise JinjaTemplateError(message=error_message) from e
def process_function_call(function_call):
if function_call is None:
param = "auto"
elif function_call == "auto" or function_call == "none":
param = function_call
else:
function_call_example = json.dumps({"name": "function_name"})
common_tsg = f"Here is a valid example: {function_call_example}. See the guide at " \
"https://platform.openai.com/docs/api-reference/chat/create#chat/create-function_call " \
"or view sample 'How to call functions with chat models' in our gallery."
param = function_call
if not isinstance(param, dict):
raise ChatAPIInvalidFunctions(
message=f"function_call parameter '{param}' must be a dict, but not {type(function_call)}. {common_tsg}"
)
else:
if "name" not in function_call:
raise ChatAPIInvalidFunctions(
message=f'function_call parameter {json.dumps(param)} must contain "name" field. {common_tsg}'
)
return param
def post_process_chat_api_response(completion, stream, functions):
if stream:
if functions is not None:
error_message = "Function calling has not been supported by stream mode yet."
raise FunctionCallNotSupportedInStreamMode(message=error_message)
def generator():
for chunk in completion:
if chunk.choices:
yield chunk.choices[0].delta.content if hasattr(chunk.choices[0].delta, 'content') and \
chunk.choices[0].delta.content is not None else ""
# We must return the generator object, not using yield directly here.
# Otherwise, the function itself will become a generator, despite whether stream is True or False.
return generator()
else:
# When calling function, function_call response will be returned as a field in message, so we need return
# message directly. Otherwise, we only return content.
if functions is not None:
return completion.model_dump()["choices"][0]["message"]
else:
# chat api may return message with no content.
return getattr(completion.choices[0].message, "content", "")
def preprocess_template_string(template_string: str) -> str:
"""Remove the image input decorator from the template string and place the image input in a new line."""
pattern = re.compile(r'\!\[(\s*image\s*)\]\(\{\{(\s*[^\s{}]+\s*)\}\}\)')
# Find all matches in the input string
matches = pattern.findall(template_string)
# Perform substitutions
for match in matches:
original = f"![{match[0]}]({{{{{match[1]}}}}})"
replacement = f"\n{{{{{match[1]}}}}}\n"
template_string = template_string.replace(original, replacement)
return template_string
def convert_to_chat_list(obj):
if isinstance(obj, dict):
return {key: convert_to_chat_list(value) for key, value in obj.items()}
elif isinstance(obj, list):
return ChatInputList([convert_to_chat_list(item) for item in obj])
else:
return obj
def add_referenced_images_to_set(value, image_set, image_type):
if isinstance(value, image_type):
image_set.add(value)
elif isinstance(value, list):
for item in value:
add_referenced_images_to_set(item, image_set, image_type)
elif isinstance(value, dict):
for _, item in value.items():
add_referenced_images_to_set(item, image_set, image_type)
def find_referenced_image_set(kwargs: dict):
referenced_images = set()
try:
from promptflow.contracts.multimedia import Image
for _, value in kwargs.items():
add_referenced_images_to_set(value, referenced_images, Image)
except ImportError:
pass
return referenced_images
def normalize_connection_config(connection):
"""
Normalizes the configuration of a given connection object for compatibility.
This function takes a connection object and normalizes its configuration,
ensuring it is compatible and standardized for use.
"""
if isinstance(connection, AzureOpenAIConnection):
return {
"api_key": connection.api_key,
"api_version": connection.api_version,
"azure_endpoint": connection.api_base
}
elif isinstance(connection, OpenAIConnection):
return {
"api_key": connection.api_key,
"organization": connection.organization,
"base_url": connection.base_url
}
else:
error_message = f"Not Support connection type '{type(connection).__name__}'. " \
f"Connection type should be in [AzureOpenAIConnection, OpenAIConnection]."
raise InvalidConnectionType(message=error_message)
| promptflow/src/promptflow-tools/promptflow/tools/common.py/0 | {
"file_path": "promptflow/src/promptflow-tools/promptflow/tools/common.py",
"repo_id": "promptflow",
"token_count": 8205
} | 5 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import argparse
import json
from functools import partial
from promptflow._cli._params import (
add_param_all_results,
add_param_max_results,
add_param_set,
add_param_yes,
base_params,
)
from promptflow._cli._utils import activate_action, confirm, exception_handler, get_secret_input, print_yellow_warning
from promptflow._sdk._constants import MAX_LIST_CLI_RESULTS
from promptflow._sdk._load_functions import load_connection
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk.entities._connection import _Connection
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.yaml_utils import load_yaml
logger = get_cli_sdk_logger()
_client = None
def _get_pf_client():
global _client
if _client is None:
_client = PFClient()
return _client
def add_param_file(parser):
parser.add_argument("--file", "-f", type=str, help="File path of the connection yaml.", required=True)
def add_param_name(parser, required=False):
parser.add_argument("--name", "-n", type=str, help="Name of the connection.", required=required)
def add_connection_parser(subparsers):
connection_parser = subparsers.add_parser(
"connection",
description="""A CLI tool to manage connections for promptflow.
Your secrets will be encrypted using AES(Advanced Encryption Standard) technology.""", # noqa: E501
help="pf connection",
)
subparsers = connection_parser.add_subparsers()
add_connection_create(subparsers)
add_connection_update(subparsers)
add_connection_show(subparsers)
add_connection_list(subparsers)
add_connection_delete(subparsers)
connection_parser.set_defaults(action="connection")
def add_connection_create(subparsers):
# Do not change the indent of epilog
epilog = """
Examples:
# Creating a connection with yaml file:
pf connection create -f connection.yaml
# Creating a connection with yaml file and overrides:
pf connection create -f connection.yaml --set api_key="my_api_key"
# Creating a custom connection with .env file, note that overrides specified by --set will be ignored:
pf connection create -f .env --name custom
"""
activate_action(
name="create",
description="Create a connection.",
epilog=epilog,
add_params=[add_param_set, add_param_file, add_param_name] + base_params,
subparsers=subparsers,
help_message="Create a connection.",
action_param_name="sub_action",
)
def add_connection_update(subparsers):
epilog = """
Examples:
# Updating a connection:
pf connection update -n my_connection --set api_key="my_api_key"
"""
activate_action(
name="update",
description="Update a connection.",
epilog=epilog,
add_params=[add_param_set, partial(add_param_name, required=True)] + base_params,
subparsers=subparsers,
help_message="Update a connection.",
action_param_name="sub_action",
)
def add_connection_show(subparsers):
epilog = """
Examples:
# Get and show a connection:
pf connection show -n my_connection_name
"""
activate_action(
name="show",
description="Show a connection for promptflow.",
epilog=epilog,
add_params=[partial(add_param_name, required=True)] + base_params,
subparsers=subparsers,
help_message="Show a connection for promptflow.",
action_param_name="sub_action",
)
def add_connection_delete(subparsers):
epilog = """
Examples:
# Delete a connection:
pf connection delete -n my_connection_name
"""
activate_action(
name="delete",
description="Delete a connection with specific name.",
epilog=epilog,
add_params=[partial(add_param_name, required=True), add_param_yes] + base_params,
subparsers=subparsers,
help_message="Delete a connection with specific name.",
action_param_name="sub_action",
)
def add_connection_list(subparsers):
epilog = """
Examples:
# List all connections:
pf connection list
"""
activate_action(
name="list",
description="List all connections.",
epilog=epilog,
add_params=[add_param_max_results, add_param_all_results] + base_params,
subparsers=subparsers,
help_message="List all connections.",
action_param_name="sub_action",
)
def validate_and_interactive_get_secrets(connection, is_update=False):
"""Validate the connection and interactive get secrets if no secrets is provided."""
prompt = "=================== Please input required secrets ==================="
missing_secrets_prompt = False
for name, val in connection.secrets.items():
if not _Connection._is_scrubbed_value(val) and not _Connection._is_user_input_value(val):
# Not scrubbed value, not require user input.
continue
if is_update and _Connection._is_scrubbed_value(val):
# Scrubbed value, will use existing, not require user input.
continue
if not missing_secrets_prompt:
print(prompt)
missing_secrets_prompt = True
while True:
secret = get_secret_input(prompt=f"{name}: ")
if secret:
break
print_yellow_warning("Secret can't be empty.")
connection.secrets[name] = secret
if missing_secrets_prompt:
print("=================== Required secrets collected ===================")
return connection
# Note the connection secrets value behaviors:
# --------------------------------------------------------------------------------
# | secret value | CLI create | CLI update | SDK create_or_update |
# --------------------------------------------------------------------------------
# | empty or all "*" | prompt input | use existing values | use existing values |
# | <no-change> | prompt input | use existing values | use existing values |
# | <user-input> | prompt input | prompt input | raise error |
# --------------------------------------------------------------------------------
@exception_handler("Connection create")
def create_connection(file_path, params_override=None, name=None):
params_override = params_override or []
if name:
params_override.append({"name": name})
connection = load_connection(source=file_path, params_override=params_override)
existing_connection = _get_pf_client().connections.get(connection.name, raise_error=False)
if existing_connection:
logger.warning(f"Connection with name {connection.name} already exists. Updating it.")
# Note: We don't set the existing secret back here, let user input the secrets.
validate_and_interactive_get_secrets(connection)
connection = _get_pf_client().connections.create_or_update(connection)
print(json.dumps(connection._to_dict(), indent=4))
@exception_handler("Connection show")
def show_connection(name):
connection = _get_pf_client().connections.get(name)
print(json.dumps(connection._to_dict(), indent=4))
@exception_handler("Connection list")
def list_connection(max_results=MAX_LIST_CLI_RESULTS, all_results=False):
connections = _get_pf_client().connections.list(max_results, all_results)
print(json.dumps([connection._to_dict() for connection in connections], indent=4))
def _upsert_connection_from_file(file, params_override=None):
# Note: This function is used for pfutil, do not edit it.
params_override = params_override or []
params_override.append(load_yaml(file))
connection = load_connection(source=file, params_override=params_override)
existing_connection = _get_pf_client().connections.get(connection.name, raise_error=False)
if existing_connection:
connection = _Connection._load(data=existing_connection._to_dict(), params_override=params_override)
validate_and_interactive_get_secrets(connection, is_update=True)
# Set the secrets not scrubbed, as _to_dict() dump scrubbed connections.
connection._secrets = existing_connection._secrets
else:
validate_and_interactive_get_secrets(connection)
connection = _get_pf_client().connections.create_or_update(connection)
return connection
@exception_handler("Connection update")
def update_connection(name, params_override=None):
params_override = params_override or []
existing_connection = _get_pf_client().connections.get(name)
connection = _Connection._load(data=existing_connection._to_dict(), params_override=params_override)
validate_and_interactive_get_secrets(connection, is_update=True)
# Set the secrets not scrubbed, as _to_dict() dump scrubbed connections.
connection._secrets = existing_connection._secrets
connection = _get_pf_client().connections.create_or_update(connection)
print(json.dumps(connection._to_dict(), indent=4))
@exception_handler("Connection delete")
def delete_connection(name, skip_confirm: bool = False):
if confirm("Are you sure you want to perform this operation?", skip_confirm):
_get_pf_client().connections.delete(name)
else:
print("The delete operation was canceled.")
def dispatch_connection_commands(args: argparse.Namespace):
if args.sub_action == "create":
create_connection(args.file, args.params_override, args.name)
elif args.sub_action == "show":
show_connection(args.name)
elif args.sub_action == "list":
list_connection(args.max_results, args.all_results)
elif args.sub_action == "update":
update_connection(args.name, args.params_override)
elif args.sub_action == "delete":
delete_connection(args.name, args.yes)
| promptflow/src/promptflow/promptflow/_cli/_pf/_connection.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/_pf/_connection.py",
"repo_id": "promptflow",
"token_count": 3450
} | 6 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import argparse
import contextlib
import json
import os
import shutil
import sys
import traceback
from collections import namedtuple
from configparser import ConfigParser
from functools import wraps
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import pydash
from dotenv import load_dotenv
from tabulate import tabulate
from promptflow._sdk._constants import CLIListOutputFormat, EnvironmentVariables
from promptflow._sdk._utils import print_red_error, print_yellow_warning
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.utils import is_in_ci_pipeline
from promptflow.exceptions import ErrorTarget, PromptflowException, UserErrorException
AzureMLWorkspaceTriad = namedtuple("AzureMLWorkspace", ["subscription_id", "resource_group_name", "workspace_name"])
logger = get_cli_sdk_logger()
def _set_workspace_argument_for_subparsers(subparser, required=False):
"""Add workspace arguments to subparsers."""
# Make these arguments optional so that user can use local azure cli context
subparser.add_argument(
"--subscription", required=required, type=str, help="Subscription id, required when pass run id."
)
subparser.add_argument(
"--resource-group", "-g", required=required, type=str, help="Resource group name, required when pass run id."
)
subparser.add_argument(
"--workspace-name", "-w", required=required, type=str, help="Workspace name, required when pass run id."
)
def dump_connection_file(dot_env_file: str):
for key in ["AZURE_OPENAI_API_KEY", "AZURE_OPENAI_API_BASE", "CHAT_DEPLOYMENT_NAME"]:
if key not in os.environ:
# skip dump connection file if not all required environment variables are set
return
connection_file_path = "./connection.json"
os.environ["PROMPTFLOW_CONNECTIONS"] = connection_file_path
load_dotenv(dot_env_file)
connection_dict = {
"custom_connection": {
"type": "CustomConnection",
"value": {
"AZURE_OPENAI_API_KEY": os.environ["AZURE_OPENAI_API_KEY"],
"AZURE_OPENAI_API_BASE": os.environ["AZURE_OPENAI_API_BASE"],
"CHAT_DEPLOYMENT_NAME": os.environ["CHAT_DEPLOYMENT_NAME"],
},
"module": "promptflow.connections",
}
}
with open(connection_file_path, "w") as f:
json.dump(connection_dict, f)
def get_workspace_triad_from_local() -> AzureMLWorkspaceTriad:
subscription_id = None
resource_group_name = None
workspace_name = None
azure_config_path = Path.home() / ".azure"
config_parser = ConfigParser()
# subscription id
try:
config_parser.read_file(open(azure_config_path / "clouds.config"))
subscription_id = config_parser["AzureCloud"]["subscription"]
except Exception: # pylint: disable=broad-except
pass
# resource group name & workspace name
try:
config_parser.read_file(open(azure_config_path / "config"))
resource_group_name = config_parser["defaults"]["group"]
workspace_name = config_parser["defaults"]["workspace"]
except Exception: # pylint: disable=broad-except
pass
return AzureMLWorkspaceTriad(subscription_id, resource_group_name, workspace_name)
def get_credentials_for_cli():
"""
This function is part of mldesigner.dsl._dynamic_executor.DynamicExecutor._get_ml_client with
some local imports.
"""
from azure.ai.ml.identity import AzureMLOnBehalfOfCredential
from azure.identity import AzureCliCredential, DefaultAzureCredential, ManagedIdentityCredential
# May return a different one if executing in local
# credential priority: OBO > azure cli > managed identity > default
# check OBO via environment variable, the referenced code can be found from below search:
# https://msdata.visualstudio.com/Vienna/_search?text=AZUREML_OBO_ENABLED&type=code&pageSize=25&filters=ProjectFilters%7BVienna%7D&action=contents
if os.getenv(IdentityEnvironmentVariable.OBO_ENABLED_FLAG):
logger.debug("User identity is configured, use OBO credential.")
credential = AzureMLOnBehalfOfCredential()
elif _use_azure_cli_credential():
logger.debug("Use azure cli credential since specified in environment variable.")
credential = AzureCliCredential()
else:
client_id_from_env = os.getenv(IdentityEnvironmentVariable.DEFAULT_IDENTITY_CLIENT_ID)
if client_id_from_env:
# use managed identity when client id is available from environment variable.
# reference code:
# https://learn.microsoft.com/en-us/azure/machine-learning/how-to-identity-based-service-authentication?tabs=cli#compute-cluster
logger.debug("Use managed identity credential.")
credential = ManagedIdentityCredential(client_id=client_id_from_env)
elif is_in_ci_pipeline():
# use managed identity when executing in CI pipeline.
logger.debug("Use azure cli credential since in CI pipeline.")
credential = AzureCliCredential()
else:
# use default Azure credential to handle other cases.
logger.debug("Use default credential.")
credential = DefaultAzureCredential()
return credential
def get_client_info_for_cli(subscription_id: str = None, resource_group_name: str = None, workspace_name: str = None):
if not (subscription_id and resource_group_name and workspace_name):
workspace_triad = get_workspace_triad_from_local()
subscription_id = subscription_id or workspace_triad.subscription_id
resource_group_name = resource_group_name or workspace_triad.resource_group_name
workspace_name = workspace_name or workspace_triad.workspace_name
if not (subscription_id and resource_group_name and workspace_name):
workspace_name = workspace_name or os.getenv("AZUREML_ARM_WORKSPACE_NAME")
subscription_id = subscription_id or os.getenv("AZUREML_ARM_SUBSCRIPTION")
resource_group_name = resource_group_name or os.getenv("AZUREML_ARM_RESOURCEGROUP")
return subscription_id, resource_group_name, workspace_name
def get_client_for_cli(*, subscription_id: str = None, resource_group_name: str = None, workspace_name: str = None):
from azure.ai.ml import MLClient
subscription_id, resource_group_name, workspace_name = get_client_info_for_cli(
subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name
)
missing_fields = []
for key in ["workspace_name", "subscription_id", "resource_group_name"]:
if not locals()[key]:
missing_fields.append(key)
if missing_fields:
raise UserErrorException(
"Please provide all required fields to work on specific workspace: {}".format(", ".join(missing_fields)),
target=ErrorTarget.CONTROL_PLANE_SDK,
)
return MLClient(
credential=get_credentials_for_cli(),
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
)
def confirm(question, skip_confirm) -> bool:
if skip_confirm:
return True
answer = input(f"{question} [y/n]")
while answer.lower() not in ["y", "n"]:
answer = input("Please input 'y' or 'n':")
return answer.lower() == "y"
@contextlib.contextmanager
def inject_sys_path(path):
original_sys_path = sys.path.copy()
sys.path.insert(0, str(path))
try:
yield
finally:
sys.path = original_sys_path
def activate_action(name, description, epilog, add_params, subparsers, help_message, action_param_name="action"):
parser = subparsers.add_parser(
name,
description=description,
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
help=help_message,
)
if add_params:
for add_param_func in add_params:
add_param_func(parser)
parser.set_defaults(**{action_param_name: name})
return parser
class IdentityEnvironmentVariable:
"""This class is copied from mldesigner._constants.IdentityEnvironmentVariable."""
DEFAULT_IDENTITY_CLIENT_ID = "DEFAULT_IDENTITY_CLIENT_ID"
OBO_ENABLED_FLAG = "AZUREML_OBO_ENABLED"
def _dump_entity_with_warnings(entity) -> Dict:
if not entity:
return
if isinstance(entity, Dict):
return entity
try:
return entity._to_dict() # type: ignore
except Exception as err:
logger.warning("Failed to deserialize response: " + str(err))
logger.warning(str(entity))
logger.debug(traceback.format_exc())
def list_of_dict_to_dict(obj: list):
if not isinstance(obj, list):
return {}
result = {}
for item in obj:
result.update(item)
return result
def list_of_dict_to_nested_dict(obj: list):
result = {}
for item in obj:
for keys, value in item.items():
keys = keys.split(".")
pydash.set_(result, keys, value)
return result
def _build_sorted_column_widths_tuple_list(
columns: List[str],
values1: Dict[str, int],
values2: Dict[str, int],
margins: Dict[str, int],
) -> List[Tuple[str, int]]:
res = []
for column in columns:
value = max(values1[column], values2[column]) + margins[column]
res.append((column, value))
res.sort(key=lambda x: x[1], reverse=True)
return res
def _assign_available_width(
column_expected_widths: List[Tuple[str, int]],
available_width: int,
column_assigned_widths: Dict[str, int],
average_width: Optional[int] = None,
) -> Tuple[int, Dict[str, int]]:
for column, expected_width in column_expected_widths:
if available_width <= 0:
break
target = average_width if average_width is not None else column_assigned_widths[column]
delta = expected_width - target
if delta <= available_width:
column_assigned_widths[column] = expected_width
available_width -= delta
else:
column_assigned_widths[column] += available_width
available_width = 0
return available_width, column_assigned_widths
def _calculate_column_widths(df: "DataFrame", terminal_width: int) -> List[int]:
num_rows, num_columns = len(df), len(df.columns)
index_column_width = max(len(str(num_rows)) + 2, 4) # tabulate index column min width is 4
terminal_width_buffer = 10
available_width = terminal_width - terminal_width_buffer - index_column_width - (num_columns + 2)
avg_available_width = available_width // num_columns
header_widths, content_avg_widths, content_max_widths, column_margin = {}, {}, {}, {}
for column in df.columns:
header_widths[column] = len(column)
contents = []
for value in df[column]:
contents.append(len(str(value)))
content_avg_widths[column] = sum(contents) // len(contents)
content_max_widths[column] = max(contents)
# if header is longer than the longest content, the margin is 4; otherwise is 2
# so we need to record this for every column
if header_widths[column] >= content_max_widths[column]:
column_margin[column] = 4
else:
column_margin[column] = 2
column_widths = {}
# first round: try to meet the average(or column header) width
# record columns that need more width, we will deal with them in second round if we still have width
round_one_left_columns = []
for column in df.columns:
expected_width = max(header_widths[column], content_avg_widths[column]) + column_margin[column]
if avg_available_width <= expected_width:
column_widths[column] = avg_available_width
round_one_left_columns.append(column)
else:
column_widths[column] = expected_width
current_available_width = available_width - sum(column_widths.values())
if current_available_width > 0:
# second round: assign left available width to those columns that need more
# assign with greedy, sort recorded columns first from longest to shortest;
# iterate and try to meet each column's expected width
column_avg_tuples = _build_sorted_column_widths_tuple_list(
round_one_left_columns, header_widths, content_avg_widths, column_margin
)
current_available_width, column_widths = _assign_available_width(
column_avg_tuples, current_available_width, column_widths, avg_available_width
)
if current_available_width > 0:
# third round: if there are still left available width, assign to try to meet the max width
# still use greedy, sort first and iterate through all columns
column_max_tuples = _build_sorted_column_widths_tuple_list(
df.columns, header_widths, content_max_widths, column_margin
)
current_available_width, column_widths = _assign_available_width(
column_max_tuples, current_available_width, column_widths
)
max_col_widths = [index_column_width] # index column
max_col_widths += [max(column_widths[column] - column_margin[column], 1) for column in df.columns] # sub margin
return max_col_widths
def pretty_print_dataframe_as_table(df: "DataFrame") -> None:
# try to get terminal window width
try:
terminal_width = shutil.get_terminal_size().columns
except Exception: # pylint: disable=broad-except
terminal_width = 120 # default value for Windows Terminal launch size columns
column_widths = _calculate_column_widths(df, terminal_width)
print(tabulate(df, headers="keys", tablefmt="grid", maxcolwidths=column_widths, maxheadercolwidths=column_widths))
def is_format_exception():
if os.environ.get("PROMPTFLOW_STRUCTURE_EXCEPTION_OUTPUT", "false").lower() == "true":
return True
return False
def exception_handler(command: str):
"""Catch known cli exceptions."""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if is_format_exception():
# When the flag format_exception is set in command,
# it will write a json with exception info and command to stderr.
error_msg = ExceptionPresenter.create(e).to_dict(include_debug_info=True)
error_msg["command"] = " ".join(sys.argv)
sys.stderr.write(json.dumps(error_msg))
if isinstance(e, PromptflowException):
print_red_error(f"{command} failed with {e.__class__.__name__}: {str(e)}")
exit(1)
else:
raise e
return wrapper
return decorator
def get_secret_input(prompt, mask="*"):
"""Get secret input with mask printed on screen in CLI.
Provide better handling for control characters:
- Handle Ctrl-C as KeyboardInterrupt
- Ignore control characters and print warning message.
"""
if not isinstance(prompt, str):
raise TypeError(f"prompt must be a str, not ${type(prompt).__name__}")
if not isinstance(mask, str):
raise TypeError(f"mask argument must be a one-character str, not ${type(mask).__name__}")
if len(mask) != 1:
raise ValueError("mask argument must be a one-character str")
if sys.platform == "win32":
# For some reason, mypy reports that msvcrt doesn't have getch, ignore this warning:
from msvcrt import getch # type: ignore
else: # macOS and Linux
import termios
import tty
def getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
secret_input = []
sys.stdout.write(prompt)
sys.stdout.flush()
while True:
key = ord(getch())
if key == 13: # Enter key pressed.
sys.stdout.write("\n")
return "".join(secret_input)
elif key == 3: # Ctrl-C pressed.
raise KeyboardInterrupt()
elif key in (8, 127): # Backspace/Del key erases previous output.
if len(secret_input) > 0:
# Erases previous character.
sys.stdout.write("\b \b") # \b doesn't erase the character, it just moves the cursor back.
sys.stdout.flush()
secret_input = secret_input[:-1]
elif 0 <= key <= 31:
msg = "\nThe last user input got ignored as it is control character."
print_yellow_warning(msg)
sys.stdout.write(prompt + mask * len(secret_input))
sys.stdout.flush()
else:
# display the mask character.
char = chr(key)
sys.stdout.write(mask)
sys.stdout.flush()
secret_input.append(char)
def _copy_to_flow(flow_path, source_file):
target = flow_path / source_file.name
action = "Overwriting" if target.exists() else "Creating"
if source_file.is_file():
print(f"{action} {source_file.name}...")
shutil.copy2(source_file, target)
else:
print(f"{action} {source_file.name} folder...")
shutil.copytree(source_file, target, dirs_exist_ok=True)
def _output_result_list_with_format(result_list: List[Dict], output_format: CLIListOutputFormat) -> None:
import pandas as pd
if output_format == CLIListOutputFormat.TABLE:
df = pd.DataFrame(result_list)
df.fillna("", inplace=True)
pretty_print_dataframe_as_table(df)
elif output_format == CLIListOutputFormat.JSON:
print(json.dumps(result_list, indent=4))
else:
warning_message = (
f"Unknown output format {output_format!r}, accepted values are 'json' and 'table';"
"will print using 'json'."
)
logger.warning(warning_message)
print(json.dumps(result_list, indent=4))
def _get_cli_activity_name(cli, args):
activity_name = cli
if getattr(args, "action", None):
activity_name += f".{args.action}"
if getattr(args, "sub_action", None):
activity_name += f".{args.sub_action}"
return activity_name
def _try_delete_existing_run_record(run_name: str):
from promptflow._sdk._errors import RunNotFoundError
from promptflow._sdk._orm import RunInfo as ORMRun
try:
ORMRun.delete(run_name)
except RunNotFoundError:
pass
def _use_azure_cli_credential():
return os.environ.get(EnvironmentVariables.PF_USE_AZURE_CLI_CREDENTIAL, "false").lower() == "true"
| promptflow/src/promptflow/promptflow/_cli/_utils.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/_utils.py",
"repo_id": "promptflow",
"token_count": 7653
} | 7 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from promptflow import tool
@tool
def line_process(groundtruth: str, prediction: str):
"""
This tool processes the prediction of a single line and returns the processed result.
:param groundtruth: the groundtruth of a single line.
:param prediction: the prediction of a single line.
"""
# Add your line processing logic here
processed_result = "Correct" if groundtruth.lower() == prediction.lower() else "Incorrect"
return processed_result
| promptflow/src/promptflow/promptflow/_cli/data/evaluation_flow/line_process.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/data/evaluation_flow/line_process.py",
"repo_id": "promptflow",
"token_count": 156
} | 8 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import copy
import json
import os
from dataclasses import fields, is_dataclass
from pathlib import Path
from typing import Any, Dict, List
from promptflow._constants import CONNECTION_NAME_PROPERTY, CONNECTION_SECRET_KEYS, PROMPTFLOW_CONNECTIONS
from promptflow._sdk._constants import CustomStrongTypeConnectionConfigs
from promptflow._utils.utils import try_import
from promptflow.contracts.tool import ConnectionType
from promptflow.contracts.types import Secret
class ConnectionManager:
"""This class will be used for construction mode to run flow. Do not include it into tool code."""
instance = None
def __init__(self, _dict: Dict[str, dict] = None):
if _dict is None and PROMPTFLOW_CONNECTIONS in os.environ:
# !!! Important !!!: Do not leverage this environment variable in any production code, this is test only.
if PROMPTFLOW_CONNECTIONS not in os.environ:
raise ValueError(f"Required environment variable {PROMPTFLOW_CONNECTIONS!r} not set.")
connection_path = Path(os.environ[PROMPTFLOW_CONNECTIONS]).resolve().absolute()
if not connection_path.exists():
raise ValueError(f"Connection file not exists. Path {connection_path.as_posix()}.")
_dict = json.loads(open(connection_path).read())
self._connections_dict = _dict or {}
self._connections = self._build_connections(self._connections_dict)
@classmethod
def _build_connections(cls, _dict: Dict[str, dict]):
"""Build connection dict."""
from promptflow._core.tools_manager import connections as cls_mapping
cls.import_requisites(_dict)
connections = {} # key to connection object
for key, connection_dict in _dict.items():
typ = connection_dict.get("type")
if typ not in cls_mapping:
supported = [key for key in cls_mapping.keys() if not key.startswith("_")]
raise ValueError(f"Unknown connection {key!r} type {typ!r}, supported are {supported}.")
value = connection_dict.get("value", {})
connection_class = cls_mapping[typ]
from promptflow.connections import CustomConnection
if connection_class is CustomConnection:
# Note: CustomConnection definition can not be got, secret keys will be provided in connection dict.
secret_keys = connection_dict.get("secret_keys", [])
secrets = {k: v for k, v in value.items() if k in secret_keys}
configs = {k: v for k, v in value.items() if k not in secrets}
connection_value = connection_class(configs=configs, secrets=secrets)
if CustomStrongTypeConnectionConfigs.PROMPTFLOW_TYPE_KEY in configs:
connection_value.custom_type = configs[CustomStrongTypeConnectionConfigs.PROMPTFLOW_TYPE_KEY]
else:
"""
Note: Ignore non exists keys of connection class,
because there are some keys just used by UX like resource id, while not used by backend.
"""
if is_dataclass(connection_class):
# Do not delete this branch, as promptflow_vectordb.connections is dataclass type.
cls_fields = {f.name: f for f in fields(connection_class)}
connection_value = connection_class(**{k: v for k, v in value.items() if k in cls_fields})
secret_keys = [f.name for f in cls_fields.values() if f.type == Secret]
else:
connection_value = connection_class(**{k: v for k, v in value.items()})
secrets = getattr(connection_value, "secrets", {})
secret_keys = list(secrets.keys()) if isinstance(secrets, dict) else []
# Set secret keys for log scrubbing
setattr(connection_value, CONNECTION_SECRET_KEYS, secret_keys)
# Use this hack to make sure serialization works
setattr(connection_value, CONNECTION_NAME_PROPERTY, key)
connections[key] = connection_value
return connections
@classmethod
def init_from_env(cls):
return ConnectionManager()
def get(self, connection_info: Any) -> Any:
"""Get Connection by connection info.
connection_info:
connection name as string or connection object
"""
if isinstance(connection_info, str):
return self._connections.get(connection_info)
elif ConnectionType.is_connection_value(connection_info):
return connection_info
return None
def get_secret_list(self) -> List[str]:
def secrets():
for connection in self._connections.values():
secret_keys = getattr(connection, CONNECTION_SECRET_KEYS, [])
for secret_key in secret_keys:
yield getattr(connection, secret_key)
return list(secrets())
@classmethod
def import_requisites(cls, _dict: Dict[str, dict]):
"""Import connection required modules."""
modules = set()
for key, connection_dict in _dict.items():
module = connection_dict.get("module")
if module:
modules.add(module)
for module in modules:
# Suppress import error, as we have legacy module promptflow.tools.connections.
try_import(module, f"Import connection module {module!r} failed.", raise_error=False)
@staticmethod
def is_legacy_connections(_dict: Dict[str, dict]):
"""Detect if is legacy connections. Legacy connections dict doesn't have module and type.
So import requisites can not be performed. Only request from MT will hit this.
Legacy connection example: {"aoai_config": {"api_key": "..."}}
"""
has_module = any(isinstance(v, dict) and "module" in v for k, v in _dict.items())
return not has_module
def to_connections_dict(self) -> dict:
"""Get all connections and reformat to key-values format."""
# Value returned: {"aoai_config": {"api_key": "..."}}
return copy.deepcopy(self._connections_dict)
| promptflow/src/promptflow/promptflow/_core/connection_manager.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_core/connection_manager.py",
"repo_id": "promptflow",
"token_count": 2536
} | 9 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import logging
import os.path
import uuid
from itertools import product
from os import PathLike
from pathlib import Path
from typing import Optional, Union
import pydash
from promptflow._sdk._constants import (
DEFAULT_ENCODING,
FLOW_DIRECTORY_MACRO_IN_CONFIG,
HOME_PROMPT_FLOW_DIR,
SERVICE_CONFIG_FILE,
ConnectionProvider,
)
from promptflow._sdk._utils import (
call_from_extension,
read_write_by_user,
gen_uuid_by_compute_info,
)
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.yaml_utils import dump_yaml, load_yaml
from promptflow.exceptions import ErrorTarget, ValidationException
logger = get_cli_sdk_logger()
class ConfigFileNotFound(ValidationException):
pass
class InvalidConfigFile(ValidationException):
pass
class InvalidConfigValue(ValidationException):
pass
class Configuration(object):
CONFIG_PATH = Path(HOME_PROMPT_FLOW_DIR) / SERVICE_CONFIG_FILE
COLLECT_TELEMETRY = "telemetry.enabled"
EXTENSION_COLLECT_TELEMETRY = "extension.telemetry_enabled"
INSTALLATION_ID = "cli.installation_id"
CONNECTION_PROVIDER = "connection.provider"
RUN_OUTPUT_PATH = "run.output_path"
USER_AGENT = "user_agent"
ENABLE_INTERNAL_FEATURES = "enable_internal_features"
_instance = None
def __init__(self, overrides=None):
if not os.path.exists(self.CONFIG_PATH.parent):
os.makedirs(self.CONFIG_PATH.parent, exist_ok=True)
if not os.path.exists(self.CONFIG_PATH):
self.CONFIG_PATH.touch(mode=read_write_by_user(), exist_ok=True)
with open(self.CONFIG_PATH, "w", encoding=DEFAULT_ENCODING) as f:
dump_yaml({}, f)
self._config = load_yaml(self.CONFIG_PATH)
if not self._config:
self._config = {}
# Allow config override by kwargs
overrides = overrides or {}
for key, value in overrides.items():
self._validate(key, value)
pydash.set_(self._config, key, value)
@property
def config(self):
return self._config
@classmethod
def get_instance(cls):
"""Use this to get instance to avoid multiple copies of same global config."""
if cls._instance is None:
cls._instance = Configuration()
return cls._instance
def set_config(self, key, value):
"""Store config in file to avoid concurrent write."""
self._validate(key, value)
pydash.set_(self._config, key, value)
with open(self.CONFIG_PATH, "w", encoding=DEFAULT_ENCODING) as f:
dump_yaml(self._config, f)
def get_config(self, key):
try:
return pydash.get(self._config, key, None)
except Exception: # pylint: disable=broad-except
return None
def get_all(self):
return self._config
@classmethod
def _get_workspace_from_config(
cls,
*,
path: Union[PathLike, str] = None,
) -> str:
"""Return a workspace arm id from an existing Azure Machine Learning Workspace.
Reads workspace configuration from a file. Throws an exception if the config file can't be found.
:param path: The path to the config file or starting directory to search.
The parameter defaults to starting the search in the current directory.
:type path: str
:return: The workspace arm id for an existing Azure ML Workspace.
:rtype: ~str
"""
from azure.ai.ml import MLClient
from azure.ai.ml._file_utils.file_utils import traverse_up_path_and_find_file
from azure.ai.ml.constants._common import AZUREML_RESOURCE_PROVIDER, RESOURCE_ID_FORMAT
path = Path(".") if path is None else Path(path)
if path.is_file():
found_path = path
else:
# Based on priority
# Look in config dirs like .azureml or plain directory
# with None
directories_to_look = [".azureml", None]
files_to_look = ["config.json"]
found_path = None
for curr_dir, curr_file in product(directories_to_look, files_to_look):
logging.debug(
"No config file directly found, starting search from %s "
"directory, for %s file name to be present in "
"%s subdirectory",
path,
curr_file,
curr_dir,
)
found_path = traverse_up_path_and_find_file(
path=path,
file_name=curr_file,
directory_name=curr_dir,
num_levels=20,
)
if found_path:
break
if not found_path:
msg = (
"We could not find config.json in: {} or in its parent directories. "
"Please provide the full path to the config file or ensure that "
"config.json exists in the parent directories."
)
raise ConfigFileNotFound(
message=msg.format(path),
no_personal_data_message=msg.format("[path]"),
target=ErrorTarget.CONTROL_PLANE_SDK,
)
subscription_id, resource_group, workspace_name = MLClient._get_workspace_info(found_path)
if not (subscription_id and resource_group and workspace_name):
raise InvalidConfigFile(
"The subscription_id, resource_group and workspace_name can not be empty. Got: "
f"subscription_id: {subscription_id}, resource_group: {resource_group}, "
f"workspace_name: {workspace_name} from file {found_path}."
)
return RESOURCE_ID_FORMAT.format(subscription_id, resource_group, AZUREML_RESOURCE_PROVIDER, workspace_name)
def get_connection_provider(self, path=None) -> Optional[str]:
"""Get the current connection provider. Default to local if not configured."""
provider = self.get_config(key=self.CONNECTION_PROVIDER)
return self.resolve_connection_provider(provider, path=path)
@classmethod
def resolve_connection_provider(cls, provider, path=None) -> Optional[str]:
if provider is None:
return ConnectionProvider.LOCAL
if provider == ConnectionProvider.AZUREML.value:
# Note: The below function has azure-ai-ml dependency.
return "azureml:" + cls._get_workspace_from_config(path=path)
# If provider not None and not Azure, return it directly.
# It can be the full path of a workspace.
return provider
def get_telemetry_consent(self) -> Optional[bool]:
"""Get the current telemetry consent value. Return None if not configured."""
if call_from_extension():
return self.get_config(key=self.EXTENSION_COLLECT_TELEMETRY)
return self.get_config(key=self.COLLECT_TELEMETRY)
def set_telemetry_consent(self, value):
"""Set the telemetry consent value and store in local."""
self.set_config(key=self.COLLECT_TELEMETRY, value=value)
def get_or_set_installation_id(self):
"""Get user id if exists, otherwise set installation id and return it."""
installation_id = self.get_config(key=self.INSTALLATION_ID)
if installation_id:
return installation_id
installation_id = gen_uuid_by_compute_info()
if not installation_id:
installation_id = str(uuid.uuid4())
self.set_config(key=self.INSTALLATION_ID, value=installation_id)
return installation_id
def get_run_output_path(self) -> Optional[str]:
"""Get the run output path in local."""
return self.get_config(key=self.RUN_OUTPUT_PATH)
def _to_dict(self):
return self._config
@staticmethod
def _validate(key: str, value: str) -> None:
if key == Configuration.RUN_OUTPUT_PATH:
if value.rstrip("/").endswith(FLOW_DIRECTORY_MACRO_IN_CONFIG):
raise InvalidConfigValue(
"Cannot specify flow directory as run output path; "
"if you want to specify run output path under flow directory, "
"please use its child folder, e.g. '${flow_directory}/.runs'."
)
return
def get_user_agent(self) -> Optional[str]:
"""Get customer set user agent. If set, will add prefix `PFCustomer_`"""
user_agent = self.get_config(key=self.USER_AGENT)
if user_agent:
return f"PFCustomer_{user_agent}"
return user_agent
def is_internal_features_enabled(self) -> Optional[bool]:
"""Get enable_preview_features"""
result = self.get_config(key=self.ENABLE_INTERNAL_FEATURES)
if isinstance(result, str):
return result.lower() == "true"
return result is True
| promptflow/src/promptflow/promptflow/_sdk/_configuration.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_configuration.py",
"repo_id": "promptflow",
"token_count": 3940
} | 10 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import time
from promptflow._sdk._serving.monitor.data_collector import FlowDataCollector
from promptflow._sdk._serving.monitor.streaming_monitor import StreamingMonitor
from promptflow._sdk._serving.monitor.metrics import MetricsRecorder, ResponseType
from promptflow._sdk._serving.utils import streaming_response_required, get_cost_up_to_now
from promptflow._sdk._serving.flow_result import FlowResult
from promptflow._utils.exception_utils import ErrorResponse
from flask import request, g
class FlowMonitor:
"""FlowMonitor is used to collect metrics & data for promptflow serving."""
def __init__(self, logger, default_flow_name, data_collector: FlowDataCollector, metrics_recorder: MetricsRecorder):
self.data_collector = data_collector
self.metrics_recorder = metrics_recorder
self.logger = logger
self.flow_name = default_flow_name
def setup_streaming_monitor_if_needed(self, response_creator, data, output):
g.streaming = response_creator.has_stream_field and response_creator.text_stream_specified_explicitly
# set streaming callback functions if the response is streaming
if g.streaming:
streaming_monitor = StreamingMonitor(
self.logger,
flow_id=g.get("flow_id", self.flow_name),
start_time=g.start_time,
inputs=data,
outputs=output,
req_id=g.get("req_id", None),
streaming_field_name=response_creator.stream_field_name,
metric_recorder=self.metrics_recorder,
data_collector=self.data_collector,
)
response_creator._on_stream_start = streaming_monitor.on_stream_start
response_creator._on_stream_end = streaming_monitor.on_stream_end
response_creator._on_stream_event = streaming_monitor.on_stream_event
self.logger.info(f"Finish stream callback setup for flow with streaming={g.streaming}.")
else:
self.logger.info("Flow does not enable streaming response.")
def handle_error(self, ex: Exception, resp_code: int):
if self.metrics_recorder:
flow_id = g.get("flow_id", self.flow_name)
err_code = ErrorResponse.from_exception(ex).innermost_error_code
streaming = g.get("streaming", False)
self.metrics_recorder.record_flow_request(flow_id, resp_code, err_code, streaming)
def start_monitoring(self):
g.start_time = time.time()
g.streaming = streaming_response_required()
# if both request_id and client_request_id are provided, each will respect their own value.
# if either one is provided, the provided one will be used for both request_id and client_request_id.
# in aml deployment, request_id is provided by aml, user can only customize client_request_id.
# in non-aml deployment, user can customize both request_id and client_request_id.
g.req_id = request.headers.get("x-request-id", None)
g.client_req_id = request.headers.get("x-ms-client-request-id", g.req_id)
g.req_id = g.req_id or g.client_req_id
self.logger.info(f"Start monitoring new request, request_id: {g.req_id}, client_request_id: {g.client_req_id}")
def finish_monitoring(self, resp_status_code):
data = g.get("data", None)
flow_result: FlowResult = g.get("flow_result", None)
req_id = g.get("req_id", None)
client_req_id = g.get("client_req_id", req_id)
flow_id = g.get("flow_id", self.flow_name)
# collect non-streaming flow request/response data
if self.data_collector and data and flow_result and flow_result.output and not g.streaming:
self.data_collector.collect_flow_data(data, flow_result.output, req_id)
if self.metrics_recorder:
if flow_result:
self.metrics_recorder.record_tracing_metrics(flow_result.run_info, flow_result.node_run_infos)
err_code = g.get("err_code", "None")
self.metrics_recorder.record_flow_request(flow_id, resp_status_code, err_code, g.streaming)
# streaming metrics will be recorded in the streaming callback func
if not g.streaming:
latency = get_cost_up_to_now(g.start_time)
self.metrics_recorder.record_flow_latency(
flow_id, resp_status_code, g.streaming, ResponseType.Default.value, latency
)
self.logger.info(f"Finish monitoring request, request_id: {req_id}, client_request_id: {client_req_id}.")
| promptflow/src/promptflow/promptflow/_sdk/_serving/monitor/flow_monitor.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_serving/monitor/flow_monitor.py",
"repo_id": "promptflow",
"token_count": 1895
} | 11 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import logging
from promptflow._sdk._configuration import Configuration
PROMPTFLOW_LOGGER_NAMESPACE = "promptflow._sdk._telemetry"
class TelemetryMixin(object):
def __init__(self, **kwargs):
# Need to call init for potential parent, otherwise it won't be initialized.
super().__init__(**kwargs)
def _get_telemetry_values(self, *args, **kwargs): # pylint: disable=unused-argument
"""Return the telemetry values of object.
:return: The telemetry values
:rtype: Dict
"""
return {}
class WorkspaceTelemetryMixin(TelemetryMixin):
def __init__(self, subscription_id, resource_group_name, workspace_name, **kwargs):
# add telemetry to avoid conflict with subclass properties
self._telemetry_subscription_id = subscription_id
self._telemetry_resource_group_name = resource_group_name
self._telemetry_workspace_name = workspace_name
super().__init__(**kwargs)
def _get_telemetry_values(self, *args, **kwargs): # pylint: disable=unused-argument
"""Return the telemetry values of run operations.
:return: The telemetry values
:rtype: Dict
"""
return {
"subscription_id": self._telemetry_subscription_id,
"resource_group_name": self._telemetry_resource_group_name,
"workspace_name": self._telemetry_workspace_name,
}
def is_telemetry_enabled():
"""Check if telemetry is enabled. Telemetry is enabled by default.
User can disable it by:
1. running `pf config set telemetry.enabled=false` command.
"""
config = Configuration.get_instance()
telemetry_consent = config.get_telemetry_consent()
if telemetry_consent is not None:
return str(telemetry_consent).lower() == "true"
return True
def get_telemetry_logger():
from promptflow._sdk._telemetry.logging_handler import PromptFlowSDKLogHandler, get_appinsights_log_handler
current_logger = logging.getLogger(PROMPTFLOW_LOGGER_NAMESPACE)
# avoid telemetry log appearing in higher level loggers
current_logger.propagate = False
current_logger.setLevel(logging.INFO)
# check if current logger already has an appinsights handler to avoid logger handler duplication
for log_handler in current_logger.handlers:
if isinstance(log_handler, PromptFlowSDKLogHandler):
# update existing handler's config
log_handler._is_telemetry_enabled = is_telemetry_enabled()
return current_logger
# otherwise, remove the existing handler and create a new one
for log_handler in current_logger.handlers:
current_logger.removeHandler(log_handler)
handler = get_appinsights_log_handler()
current_logger.addHandler(handler)
return current_logger
| promptflow/src/promptflow/promptflow/_sdk/_telemetry/telemetry.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_telemetry/telemetry.py",
"repo_id": "promptflow",
"token_count": 1051
} | 12 |
#! /bin/bash
echo "start promptflow serving"
cd /flow
dotnet Promptflow.dll --port "8080" --yaml_path "flow.dag.yaml" --assembly_folder "." --connection_folder_path "../connections" --log_path "" --serving | promptflow/src/promptflow/promptflow/_sdk/data/docker_csharp/runit/promptflow-serve/run.jinja2/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/data/docker_csharp/runit/promptflow-serve/run.jinja2",
"repo_id": "promptflow",
"token_count": 71
} | 13 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import datetime
import functools
import json
import uuid
from os import PathLike
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from dateutil import parser as date_parser
from promptflow._sdk._configuration import Configuration
from promptflow._sdk._constants import (
BASE_PATH_CONTEXT_KEY,
DEFAULT_ENCODING,
DEFAULT_VARIANT,
FLOW_DIRECTORY_MACRO_IN_CONFIG,
FLOW_RESOURCE_ID_PREFIX,
PARAMS_OVERRIDE_KEY,
PROMPT_FLOW_DIR_NAME,
REGISTRY_URI_PREFIX,
REMOTE_URI_PREFIX,
RUN_MACRO,
TIMESTAMP_MACRO,
VARIANT_ID_MACRO,
AzureRunTypes,
DownloadedRun,
FlowRunProperties,
RestRunTypes,
RunDataKeys,
RunInfoSources,
RunStatus,
RunTypes,
)
from promptflow._sdk._errors import InvalidRunError, InvalidRunStatusError
from promptflow._sdk._orm import RunInfo as ORMRun
from promptflow._sdk._utils import (
_sanitize_python_variable_name,
is_remote_uri,
parse_remote_flow_pattern,
parse_variant,
)
from promptflow._sdk.entities._yaml_translatable import YAMLTranslatableMixin
from promptflow._sdk.schemas._run import RunSchema
from promptflow._utils.flow_utils import get_flow_lineage_id
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.exceptions import UserErrorException
AZURE_RUN_TYPE_2_RUN_TYPE = {
AzureRunTypes.BATCH: RunTypes.BATCH,
AzureRunTypes.EVALUATION: RunTypes.EVALUATION,
AzureRunTypes.PAIRWISE_EVALUATE: RunTypes.PAIRWISE_EVALUATE,
}
REST_RUN_TYPE_2_RUN_TYPE = {
RestRunTypes.BATCH: RunTypes.BATCH,
RestRunTypes.EVALUATION: RunTypes.EVALUATION,
RestRunTypes.PAIRWISE_EVALUATE: RunTypes.PAIRWISE_EVALUATE,
}
logger = get_cli_sdk_logger()
class Run(YAMLTranslatableMixin):
"""Flow run entity.
:param flow: Path of the flow directory.
:type flow: Path
:param name: Name of the run.
:type name: str
:param data: Input data for the run. Local path or remote uri(starts with azureml: or public URL) are supported. Note: remote uri is only supported for cloud run. # noqa: E501
:type data: Optional[str]
:param variant: Variant of the run.
:type variant: Optional[str]
:param run: Parent run or run ID.
:type run: Optional[Union[Run, str]]
:param column_mapping: Column mapping for the run. Optional since it's not stored in the database.
:type column_mapping: Optional[dict]
:param display_name: Display name of the run.
:type display_name: Optional[str]
:param description: Description of the run.
:type description: Optional[str]
:param tags: Tags of the run.
:type tags: Optional[List[Dict[str, str]]]
:param created_on: Date and time the run was created.
:type created_on: Optional[datetime.datetime]
:param start_time: Date and time the run started.
:type start_time: Optional[datetime.datetime]
:param end_time: Date and time the run ended.
:type end_time: Optional[datetime.datetime]
:param status: Status of the run.
:type status: Optional[str]
:param environment_variables: Environment variables for the run.
:type environment_variables: Optional[Dict[str, str]]
:param connections: Connections for the run.
:type connections: Optional[Dict[str, Dict]]
:param properties: Properties of the run.
:type properties: Optional[Dict[str, Any]]
:param kwargs: Additional keyword arguments.
:type kwargs: Optional[dict]
"""
def __init__(
self,
flow: Optional[Union[Path, str]] = None,
name: Optional[str] = None,
# input fields are optional since it's not stored in DB
data: Optional[str] = None,
variant: Optional[str] = None,
run: Optional[Union["Run", str]] = None,
column_mapping: Optional[dict] = None,
display_name: Optional[str] = None,
description: Optional[str] = None,
tags: Optional[List[Dict[str, str]]] = None,
*,
created_on: Optional[datetime.datetime] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
status: Optional[str] = None,
environment_variables: Optional[Dict[str, str]] = None,
connections: Optional[Dict[str, Dict]] = None,
properties: Optional[Dict[str, Any]] = None,
source: Optional[Union[Path, str]] = None,
**kwargs,
):
# TODO: remove when RUN CRUD don't depend on this
self.type = kwargs.get("type", RunTypes.BATCH)
self.data = data
self.column_mapping = column_mapping
self.display_name = display_name
self.description = description
self.tags = tags
self.variant = variant
self.run = run
self._created_on = created_on or datetime.datetime.now()
self._status = status or RunStatus.NOT_STARTED
self.environment_variables = environment_variables or {}
self.connections = connections or {}
self._properties = properties or {}
self.source = source
self._is_archived = kwargs.get("is_archived", False)
self._run_source = kwargs.get("run_source", RunInfoSources.LOCAL)
self._start_time = start_time
self._end_time = end_time
self._duration = kwargs.get("duration", None)
self._portal_url = kwargs.get(RunDataKeys.PORTAL_URL, None)
self._creation_context = kwargs.get("creation_context", None)
# init here to make sure those fields initialized in all branches.
self.flow = flow
self._use_remote_flow = is_remote_uri(flow)
self._experiment_name = None
self._lineage_id = None
if self._use_remote_flow:
self._flow_name = parse_remote_flow_pattern(flow)
self._lineage_id = self._flow_name
# default run name: flow directory name + timestamp
self.name = name or self._generate_run_name()
experiment_name = kwargs.get("experiment_name", None)
if self._run_source == RunInfoSources.LOCAL and not self._use_remote_flow:
self.flow = Path(flow).resolve().absolute()
flow_dir = self._get_flow_dir()
# sanitize flow_dir to avoid invalid experiment name
self._experiment_name = _sanitize_python_variable_name(flow_dir.name)
self._lineage_id = get_flow_lineage_id(flow_dir=flow_dir)
self._output_path = Path(
kwargs.get("output_path", self._generate_output_path(config=kwargs.get("config", None)))
)
self._flow_name = flow_dir.name
elif self._run_source == RunInfoSources.INDEX_SERVICE:
self._metrics = kwargs.get("metrics", {})
self._experiment_name = experiment_name
elif self._run_source == RunInfoSources.RUN_HISTORY:
self._error = kwargs.get("error", None)
self._output = kwargs.get("output", None)
elif self._run_source == RunInfoSources.EXISTING_RUN:
# when the run is created from an existing run folder, the output path is also the source path
self._output_path = Path(source)
self._runtime = kwargs.get("runtime", None)
self._resources = kwargs.get("resources", None)
self._outputs = kwargs.get("outputs", None)
self._command = kwargs.get("command", None)
@property
def created_on(self) -> str:
return self._created_on.isoformat()
@property
def status(self) -> str:
return self._status
@property
def properties(self) -> Dict[str, str]:
result = {}
if self._run_source == RunInfoSources.LOCAL:
# show posix path to avoid windows path escaping
result = {
FlowRunProperties.FLOW_PATH: Path(self.flow).as_posix() if not self._use_remote_flow else self.flow,
FlowRunProperties.OUTPUT_PATH: self._output_path.as_posix(),
}
if self.run:
run_name = self.run.name if isinstance(self.run, Run) else self.run
result[FlowRunProperties.RUN] = run_name
if self.variant:
result[FlowRunProperties.NODE_VARIANT] = self.variant
if self._command:
result[FlowRunProperties.COMMAND] = self._command
if self._outputs:
result[FlowRunProperties.OUTPUTS] = self._outputs
elif self._run_source == RunInfoSources.EXISTING_RUN:
result = {
FlowRunProperties.OUTPUT_PATH: Path(self.source).resolve().as_posix(),
}
return {
**result,
**self._properties,
}
@classmethod
def _from_orm_object(cls, obj: ORMRun) -> "Run":
properties_json = json.loads(str(obj.properties))
flow = properties_json.get(FlowRunProperties.FLOW_PATH, None)
# there can be two sources for orm run object:
# 1. LOCAL: Created when run is created from local flow
# 2. EXISTING_RUN: Created when run is created from existing run folder
source = None
if getattr(obj, "run_source", None) == RunInfoSources.EXISTING_RUN:
source = properties_json[FlowRunProperties.OUTPUT_PATH]
return Run(
type=obj.type,
name=str(obj.name),
flow=Path(flow) if flow else None,
source=Path(source) if source else None,
output_path=properties_json[FlowRunProperties.OUTPUT_PATH],
run=properties_json.get(FlowRunProperties.RUN, None),
variant=properties_json.get(FlowRunProperties.NODE_VARIANT, None),
display_name=obj.display_name,
description=str(obj.description) if obj.description else None,
tags=json.loads(str(obj.tags)) if obj.tags else None,
# keyword arguments
created_on=datetime.datetime.fromisoformat(str(obj.created_on)),
start_time=datetime.datetime.fromisoformat(str(obj.start_time)) if obj.start_time else None,
end_time=datetime.datetime.fromisoformat(str(obj.end_time)) if obj.end_time else None,
status=str(obj.status),
data=Path(obj.data).resolve().absolute().as_posix() if obj.data else None,
properties={FlowRunProperties.SYSTEM_METRICS: properties_json.get(FlowRunProperties.SYSTEM_METRICS, {})},
# compatible with old runs, their run_source is empty, treat them as local
run_source=obj.run_source or RunInfoSources.LOCAL,
# experiment command node only fields
command=properties_json.get(FlowRunProperties.COMMAND, None),
outputs=properties_json.get(FlowRunProperties.OUTPUTS, None),
)
@classmethod
def _from_index_service_entity(cls, run_entity: dict) -> "Run":
"""Convert run entity from index service to run object."""
# TODO(2887134): support cloud eager Run CRUD
start_time = run_entity["properties"].get("startTime", None)
end_time = run_entity["properties"].get("endTime", None)
duration = run_entity["properties"].get("duration", None)
return Run(
name=run_entity["properties"]["runId"],
flow=Path(f"azureml://flows/{run_entity['properties']['experimentName']}"),
type=AZURE_RUN_TYPE_2_RUN_TYPE[run_entity["properties"]["runType"]],
created_on=date_parser.parse(run_entity["properties"]["creationContext"]["createdTime"]),
status=run_entity["annotations"]["status"],
display_name=run_entity["annotations"]["displayName"],
description=run_entity["annotations"]["description"],
tags=run_entity["annotations"]["tags"],
properties=run_entity["properties"]["userProperties"],
is_archived=run_entity["annotations"]["archived"],
run_source=RunInfoSources.INDEX_SERVICE,
metrics=run_entity["annotations"]["metrics"],
start_time=date_parser.parse(start_time) if start_time else None,
end_time=date_parser.parse(end_time) if end_time else None,
duration=duration,
creation_context=run_entity["properties"]["creationContext"],
experiment_name=run_entity["properties"]["experimentName"],
)
@classmethod
def _from_run_history_entity(cls, run_entity: dict) -> "Run":
"""Convert run entity from run history service to run object."""
# TODO(2887134): support cloud eager Run CRUD
flow_name = run_entity["properties"].get("azureml.promptflow.flow_name", None)
start_time = run_entity.get("startTimeUtc", None)
end_time = run_entity.get("endTimeUtc", None)
duration = run_entity.get("duration", None)
return Run(
name=run_entity["runId"],
flow=Path(f"azureml://flows/{flow_name}"),
type=AZURE_RUN_TYPE_2_RUN_TYPE[run_entity["runType"]],
created_on=date_parser.parse(run_entity["createdUtc"]),
start_time=date_parser.parse(start_time) if start_time else None,
end_time=date_parser.parse(end_time) if end_time else None,
duration=duration,
status=run_entity["status"],
display_name=run_entity["displayName"],
description=run_entity["description"],
tags=run_entity["tags"],
properties=run_entity["properties"],
is_archived=run_entity.get("archived", False), # TODO: Get archived status, depends on run history team
error=run_entity.get("error", None),
run_source=RunInfoSources.RUN_HISTORY,
portal_url=run_entity[RunDataKeys.PORTAL_URL],
creation_context=run_entity["createdBy"],
data=run_entity[RunDataKeys.DATA],
run=run_entity[RunDataKeys.RUN],
output=run_entity[RunDataKeys.OUTPUT],
)
@classmethod
def _from_mt_service_entity(cls, run_entity) -> "Run":
"""Convert run object from MT service to run object."""
flow_run_id = run_entity.flow_run_resource_id.split("/")[-1]
return cls(
name=flow_run_id,
flow=Path(f"azureml://flows/{run_entity.flow_name}"),
display_name=run_entity.flow_run_display_name,
description="",
tags=[],
created_on=date_parser.parse(run_entity.created_on),
status="",
run_source=RunInfoSources.MT_SERVICE,
)
def _to_orm_object(self) -> ORMRun:
"""Convert current run entity to ORM object."""
display_name = self._format_display_name()
return ORMRun(
type=self.type,
name=self.name,
created_on=self.created_on,
status=self.status,
start_time=self._start_time.isoformat() if self._start_time else None,
end_time=self._end_time.isoformat() if self._end_time else None,
display_name=display_name,
description=self.description,
tags=json.dumps(self.tags) if self.tags else None,
properties=json.dumps(self.properties),
data=Path(self.data).resolve().absolute().as_posix() if self.data else None,
run_source=self._run_source,
)
def _dump(self) -> None:
"""Dump current run entity to local DB."""
self._to_orm_object().dump()
def _to_dict(
self,
*,
exclude_additional_info: bool = False,
exclude_debug_info: bool = False,
exclude_properties: bool = False,
):
from promptflow._sdk.operations._local_storage_operations import LocalStorageOperations
properties = self.properties
result = {
"name": self.name,
"created_on": self.created_on,
"status": self.status,
"display_name": self.display_name,
"description": self.description,
"tags": self.tags,
"properties": properties,
}
if self._run_source == RunInfoSources.LOCAL:
result["flow_name"] = self._flow_name
local_storage = LocalStorageOperations(run=self)
result[RunDataKeys.DATA] = (
local_storage._data_path.resolve().absolute().as_posix()
if local_storage._data_path is not None
else None
)
result[RunDataKeys.OUTPUT] = local_storage.outputs_folder.as_posix()
if self.run:
run_name = self.run.name if isinstance(self.run, Run) else self.run
result[RunDataKeys.RUN] = properties.pop(FlowRunProperties.RUN, run_name)
# add exception part if any
exception_dict = local_storage.load_exception()
if exception_dict:
if exclude_additional_info:
exception_dict.pop("additionalInfo", None)
if exclude_debug_info:
exception_dict.pop("debugInfo", None)
result["error"] = exception_dict
elif self._run_source == RunInfoSources.INDEX_SERVICE:
result["creation_context"] = self._creation_context
result["flow_name"] = self._experiment_name
result["is_archived"] = self._is_archived
result["start_time"] = self._start_time.isoformat() if self._start_time else None
result["end_time"] = self._end_time.isoformat() if self._end_time else None
result["duration"] = self._duration
elif self._run_source == RunInfoSources.RUN_HISTORY:
result["creation_context"] = self._creation_context
result["start_time"] = self._start_time.isoformat() if self._start_time else None
result["end_time"] = self._end_time.isoformat() if self._end_time else None
result["duration"] = self._duration
result[RunDataKeys.PORTAL_URL] = self._portal_url
result[RunDataKeys.DATA] = self.data
result[RunDataKeys.OUTPUT] = self._output
if self.run:
result[RunDataKeys.RUN] = self.run
if self._error:
result["error"] = self._error
if exclude_additional_info:
result["error"]["error"].pop("additionalInfo", None)
if exclude_debug_info:
result["error"]["error"].pop("debugInfo", None)
# hide properties when needed (e.g. list remote runs)
if exclude_properties is True:
result.pop("properties", None)
return result
@classmethod
def _load(
cls,
data: Optional[Dict] = None,
yaml_path: Optional[Union[PathLike, str]] = None,
params_override: Optional[list] = None,
**kwargs,
):
from marshmallow import INCLUDE
data = data or {}
params_override = params_override or []
context = {
BASE_PATH_CONTEXT_KEY: Path(yaml_path).parent if yaml_path else Path("./"),
PARAMS_OVERRIDE_KEY: params_override,
}
run = cls._load_from_dict(
data=data,
context=context,
additional_message="Failed to load flow run",
unknown=INCLUDE,
**kwargs,
)
if yaml_path:
run._source_path = yaml_path
return run
def _generate_run_name(self) -> str:
"""Generate a run name with flow_name_variant_timestamp format."""
try:
flow_name = self._get_flow_dir().name if not self._use_remote_flow else self._flow_name
variant = self.variant
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f")
variant = parse_variant(variant)[1] if variant else DEFAULT_VARIANT
run_name_prefix = f"{flow_name}_{variant}"
# TODO(2562996): limit run name to avoid it become too long
run_name = f"{run_name_prefix}_{timestamp}"
return _sanitize_python_variable_name(run_name)
except Exception:
return str(uuid.uuid4())
def _get_default_display_name(self) -> str:
display_name = self.display_name or self.name
return display_name
def _format_display_name(self) -> str:
"""
Format display name. Replace macros in display name with actual values.
The following macros are supported: ${variant_id}, ${run}, ${timestamp}
For example,
if the display name is "run-${variant_id}-${timestamp}"
it will be formatted to "run-variant_1-20210901123456"
"""
display_name = self._get_default_display_name()
time_stamp = datetime.datetime.now().strftime("%Y%m%d%H%M")
if self.run:
display_name = display_name.replace(RUN_MACRO, self._validate_and_return_run_name(self.run))
display_name = display_name.replace(TIMESTAMP_MACRO, time_stamp)
variant = self.variant
variant = parse_variant(variant)[1] if variant else DEFAULT_VARIANT
display_name = display_name.replace(VARIANT_ID_MACRO, variant)
return display_name
def _get_flow_dir(self) -> Path:
if not self._use_remote_flow:
flow = Path(self.flow)
if flow.is_dir():
return flow
return flow.parent
raise UserErrorException("Cannot get flow directory for remote flow.")
@classmethod
def _get_schema_cls(self):
return RunSchema
def _to_rest_object(self):
from azure.ai.ml._utils._storage_utils import AzureMLDatastorePathUri
from promptflow.azure._restclient.flow.models import (
BatchDataInput,
RunDisplayNameGenerationType,
SessionSetupModeEnum,
SubmitBulkRunRequest,
)
if self.run is not None:
if isinstance(self.run, Run):
variant = self.run.name
elif isinstance(self.run, str):
variant = self.run
else:
raise UserErrorException(f"Invalid run type: {type(self.run)}")
else:
variant = None
if not variant and not self.data:
raise UserErrorException("Either run or data should be provided")
# parse inputs mapping
inputs_mapping = {}
if self.column_mapping and not isinstance(self.column_mapping, dict):
raise UserErrorException(f"column_mapping should be a dictionary, got {type(self.column_mapping)} instead.")
if self.column_mapping:
for k, v in self.column_mapping.items():
if isinstance(v, (int, float, str, bool)):
inputs_mapping[k] = v
else:
try:
val = json.dumps(v)
except Exception as e:
raise UserErrorException(
f"Invalid input mapping value: {v}, "
f"only primitive or json serializable value is supported, got {type(v)}",
error=e,
)
inputs_mapping[k] = val
# parse resources
if self._resources is not None:
if not isinstance(self._resources, dict):
raise TypeError(f"resources should be a dict, got {type(self._resources)} for {self._resources}")
vm_size = self._resources.get("instance_type", None)
compute_name = self._resources.get("compute", None)
else:
vm_size = None
compute_name = None
# use functools.partial to avoid too many arguments that have the same values
common_submit_bulk_run_request = functools.partial(
SubmitBulkRunRequest,
run_id=self.name,
# will use user provided display name since PFS will have special logic to update it.
run_display_name=self._get_default_display_name(),
description=self.description,
tags=self.tags,
node_variant=self.variant,
variant_run_id=variant,
batch_data_input=BatchDataInput(
data_uri=self.data,
),
inputs_mapping=inputs_mapping,
run_experiment_name=self._experiment_name,
environment_variables=self.environment_variables,
connections=self.connections,
flow_lineage_id=self._lineage_id,
run_display_name_generation_type=RunDisplayNameGenerationType.USER_PROVIDED_MACRO,
vm_size=vm_size,
session_setup_mode=SessionSetupModeEnum.SYSTEM_WAIT,
compute_name=compute_name,
)
if str(self.flow).startswith(REMOTE_URI_PREFIX):
if not self._use_remote_flow:
# in normal case, we will upload local flow to datastore and resolve the self.flow to be remote uri
# upload via _check_and_upload_path
# submit with params FlowDefinitionDataStoreName and FlowDefinitionBlobPath
path_uri = AzureMLDatastorePathUri(str(self.flow))
return common_submit_bulk_run_request(
flow_definition_data_store_name=path_uri.datastore,
flow_definition_blob_path=path_uri.path,
)
else:
# if the flow is a remote flow in the beginning, we will submit with params FlowDefinitionResourceID
# submit with params flow_definition_resource_id which will be resolved in pfazure run create operation
# the flow resource id looks like: "azureml://locations/<region>/workspaces/<ws-name>/flows/<flow-name>"
if not isinstance(self.flow, str) or (
not self.flow.startswith(FLOW_RESOURCE_ID_PREFIX) and not self.flow.startswith(REGISTRY_URI_PREFIX)
):
raise UserErrorException(
f"Invalid flow value when transforming to rest object: {self.flow!r}. "
f"Expecting a flow definition resource id starts with '{FLOW_RESOURCE_ID_PREFIX}' "
f"or a flow registry uri starts with '{REGISTRY_URI_PREFIX}'"
)
return common_submit_bulk_run_request(
flow_definition_resource_id=self.flow,
)
else:
# upload via CodeOperations.create_or_update
# submit with param FlowDefinitionDataUri
return common_submit_bulk_run_request(
flow_definition_data_uri=str(self.flow),
)
def _check_run_status_is_completed(self) -> None:
if self.status != RunStatus.COMPLETED:
error_message = f"Run {self.name!r} is not completed, the status is {self.status!r}."
if self.status != RunStatus.FAILED:
error_message += " Please wait for its completion, or select other completed run(s)."
raise InvalidRunStatusError(error_message)
@staticmethod
def _validate_and_return_run_name(run: Union[str, "Run"]) -> str:
"""Check if run name is valid."""
if isinstance(run, Run):
return run.name
elif isinstance(run, str):
return run
raise InvalidRunError(f"Invalid run {run!r}, expected 'str' or 'Run' object but got {type(run)!r}.")
def _validate_for_run_create_operation(self):
"""Validate run object for create operation."""
# check flow value
if Path(self.flow).is_dir():
# local flow
pass
elif isinstance(self.flow, str) and self.flow.startswith(REMOTE_URI_PREFIX):
# remote flow
pass
else:
raise UserErrorException(
f"Invalid flow value: {self.flow!r}. Expecting a local flow folder path or a remote flow pattern "
f"like '{REMOTE_URI_PREFIX}<flow-name>'"
)
if is_remote_uri(self.data):
# Pass through ARM id or remote url, the error will happen in runtime if format is not correct currently.
pass
else:
if self.data and not Path(self.data).exists():
raise UserErrorException(f"data path {self.data} does not exist")
if not self.run and not self.data:
raise UserErrorException("at least one of data or run must be provided")
def _generate_output_path(self, config: Optional[Configuration]) -> Path:
config = config or Configuration.get_instance()
path = config.get_run_output_path()
if path is None:
path = Path.home() / PROMPT_FLOW_DIR_NAME / ".runs"
else:
try:
flow_posix_path = self.flow.resolve().as_posix()
path = Path(path.replace(FLOW_DIRECTORY_MACRO_IN_CONFIG, self.flow.resolve().as_posix())).resolve()
# in case user manually modifies ~/.promptflow/pf.yaml
# fall back to default run output path
if path.as_posix() == flow_posix_path:
raise Exception(f"{FLOW_DIRECTORY_MACRO_IN_CONFIG!r} is not a valid value.")
path.mkdir(parents=True, exist_ok=True)
except Exception: # pylint: disable=broad-except
path = Path.home() / PROMPT_FLOW_DIR_NAME / ".runs"
warning_message = (
"Got unexpected error when parsing specified output path: "
f"{config.get_run_output_path()!r}; "
f"will use default output path: {path!r} instead."
)
logger.warning(warning_message)
return (path / str(self.name)).resolve()
@classmethod
def _load_from_source(cls, source: Union[str, Path], params_override: Optional[Dict] = None, **kwargs) -> "Run":
"""Load run from run record source folder."""
source = Path(source)
params_override = params_override or {}
run_metadata_file = source / DownloadedRun.RUN_METADATA_FILE_NAME
if not run_metadata_file.exists():
raise UserErrorException(
f"Invalid run source: {source!r}. Expecting a valid run source folder with {run_metadata_file!r}. "
f"Please make sure the run source is downloaded by 'pfazure run download' command."
)
# extract run info from source folder
with open(source / DownloadedRun.RUN_METADATA_FILE_NAME, encoding=DEFAULT_ENCODING) as f:
run_info = json.load(f)
return cls(
name=run_info["name"],
source=source,
run_source=RunInfoSources.EXISTING_RUN,
status=run_info["status"], # currently only support completed run
display_name=params_override.get("display_name", run_info.get("display_name", source.name)),
description=params_override.get("description", run_info.get("description", "")),
tags=params_override.get("tags", run_info.get("tags", {})),
created_on=datetime.datetime.fromisoformat(run_info["created_on"]),
start_time=datetime.datetime.fromisoformat(run_info["start_time"]),
end_time=datetime.datetime.fromisoformat(run_info["end_time"]),
**kwargs,
)
| promptflow/src/promptflow/promptflow/_sdk/entities/_run.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/entities/_run.py",
"repo_id": "promptflow",
"token_count": 14053
} | 14 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import copy
import json
import typing
from promptflow._constants import SpanAttributeFieldName, SpanFieldName
from promptflow._sdk._orm.trace import LineRun as ORMLineRun
from promptflow._sdk._orm.trace import Span as ORMSpan
from promptflow._sdk.entities._trace import LineRun, Span
class TraceOperations:
def list_spans(
self,
session_id: typing.Optional[str] = None,
) -> typing.List[Span]:
orm_spans = ORMSpan.list(
session_id=session_id,
)
return [Span._from_orm_object(orm_span) for orm_span in orm_spans]
def list_line_runs(
self,
session_id: typing.Optional[str] = None,
) -> typing.List[LineRun]:
line_runs = []
orm_spans_group_by_trace_id = ORMLineRun.list(session_id=session_id)
# merge spans with same `line_run_id` or `referenced.line_run_id` (if exists)
grouped_orm_spans = {}
for orm_spans in orm_spans_group_by_trace_id:
first_orm_span = orm_spans[0]
attributes = json.loads(first_orm_span.content)[SpanFieldName.ATTRIBUTES]
if (
SpanAttributeFieldName.LINE_RUN_ID not in attributes
and SpanAttributeFieldName.REFERENCED_LINE_RUN_ID not in attributes
):
# no `line_run_id` or `referenced.line_run_id` in attributes
# standard OpenTelemetry trace, regard as a line run
grouped_orm_spans[first_orm_span.trace_id] = copy.deepcopy(orm_spans)
elif (
SpanAttributeFieldName.LINE_RUN_ID in attributes
and SpanAttributeFieldName.REFERENCED_LINE_RUN_ID not in attributes
):
# main flow trace
line_run_id = attributes[SpanAttributeFieldName.LINE_RUN_ID]
if line_run_id not in grouped_orm_spans:
grouped_orm_spans[line_run_id] = []
grouped_orm_spans[line_run_id].extend(copy.deepcopy(orm_spans))
elif (
SpanAttributeFieldName.LINE_RUN_ID in attributes
and SpanAttributeFieldName.REFERENCED_LINE_RUN_ID in attributes
):
# evaluation flow trace
referenced_line_run_id = attributes[SpanAttributeFieldName.REFERENCED_LINE_RUN_ID]
if referenced_line_run_id not in grouped_orm_spans:
grouped_orm_spans[referenced_line_run_id] = []
grouped_orm_spans[referenced_line_run_id].extend(copy.deepcopy(orm_spans))
else:
# aggregation node, ignore for now
pass
for orm_spans in grouped_orm_spans.values():
spans = [Span._from_orm_object(orm_span) for orm_span in orm_spans]
line_run = LineRun._from_spans(spans)
if line_run is not None:
line_runs.append(line_run)
return line_runs
| promptflow/src/promptflow/promptflow/_sdk/operations/_trace_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/operations/_trace_operations.py",
"repo_id": "promptflow",
"token_count": 1438
} | 15 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from dataclasses import fields, is_dataclass
from datetime import datetime
from enum import Enum
from typing import Any, Callable, Dict, List, Type, TypeVar
from promptflow._core.generator_proxy import GeneratorProxy
from promptflow.contracts.tool import ConnectionType
T = TypeVar("T")
def get_type(obj: type):
if is_dataclass(obj):
return obj
if isinstance(obj, list):
return List[get_type(obj[0])]
if isinstance(obj, dict):
return Dict[str, get_type(obj[list(obj.keys())[0]])]
return obj
def deserialize_dataclass(cls: Type[T], data: dict) -> T:
if not is_dataclass(cls):
raise ValueError(f"{cls} is not a dataclass")
if not isinstance(data, dict):
raise ValueError(f"{data} is not a dict")
kwargs = {}
for field in fields(cls):
if field.name not in data:
kwargs[field.name] = field.default
continue
field_type = get_type(field.type)
kwargs[field.name] = deserialize_value(data[field.name], field_type)
return cls(**kwargs)
def deserialize_value(obj, field_type):
if not isinstance(field_type, type):
return obj
if is_dataclass(field_type):
return deserialize_dataclass(field_type, obj)
if issubclass(field_type, Enum):
return field_type(obj)
if issubclass(field_type, datetime) and obj is not None:
# Remove Z/z at the end of the string.
if obj.endswith("Z") or obj.endswith("z"):
return datetime.fromisoformat(obj[:-1])
return datetime.fromisoformat(obj)
return obj
def serialize(value: object, remove_null: bool = False, serialization_funcs: Dict[type, Callable] = None) -> dict:
if serialization_funcs:
for cls, f in serialization_funcs.items():
if isinstance(value, cls):
return f(value)
if isinstance(value, datetime):
return value.isoformat() + "Z"
if isinstance(value, Enum):
return value.value
if isinstance(value, list):
return [serialize(v, remove_null, serialization_funcs) for v in value]
if isinstance(value, GeneratorProxy):
# TODO: The current implementation of the serialize function is not self-explanatory, as value.items is mutable
# whereas the serialize function should deal with a fixed object. We should rename the function to
# to_serializable to better reflect its purpose.
return value.items
# Note that custom connection check should before dict check
if ConnectionType.is_connection_value(value):
return ConnectionType.serialize_conn(value)
if isinstance(value, dict):
return {k: serialize(v, remove_null, serialization_funcs) for k, v in value.items()}
if is_dataclass(value):
if hasattr(value, "serialize"):
result = value.serialize()
else:
result = {
f.name: serialize(getattr(value, f.name), remove_null, serialization_funcs) for f in fields(value)
}
if not remove_null:
return result
null_keys = [k for k, v in result.items() if v is None]
for k in null_keys:
result.pop(k)
return result
try:
from pydantic import BaseModel
if isinstance(value, BaseModel): # Handle pydantic model, which is used in langchain
return value.dict()
except ImportError:
# Ignore ImportError if pydantic is not installed
pass
return value
def assertEqual(a: dict, b: dict, path: str = ""):
if isinstance(a, dict):
assert isinstance(b, dict), f"{path}: {type(a)} != {type(b)}"
assert set(a.keys()) == set(b.keys()), f"{path}: {set(a.keys())} != {set(b.keys())}"
for key in a.keys():
assertEqual(a[key], b[key], path + "." + key)
elif isinstance(a, list):
assert isinstance(b, list), f"{path}: {type(a)} != {type(b)}"
assert len(a) == len(b), f"{path}: {len(a)} != {len(b)}"
for i in range(len(a)):
assertEqual(a[i], b[i], path + f"[{i}]")
else:
assert a == b, f"{path}: {a} != {b}"
def convert_eager_flow_output_to_dict(value: Any):
"""
Convert the output of eager flow to a dict. Since the output of eager flow
may not be a dict, we need to convert it to a dict in batch mode.
Examples:
1. If the output is a dict, return it directly:
value = {"output": 1} -> {"output": 1}
2. If the output is a dataclass, convert it to a dict:
value = SampleDataClass(output=1) -> {"output": 1}
3. If the output is not a dict or dataclass, convert it to a dict by adding a key "output":
value = 1 -> {"output": 1}
"""
if isinstance(value, dict):
return value
elif is_dataclass(value):
return {f.name: getattr(value, f.name) for f in fields(value)}
else:
return {"output": value}
| promptflow/src/promptflow/promptflow/_utils/dataclass_serializer.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_utils/dataclass_serializer.py",
"repo_id": "promptflow",
"token_count": 2062
} | 16 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import datetime
import json
import logging
from promptflow._constants import (
LAST_HINT_TIME,
LAST_CHECK_TIME,
PF_VERSION_CHECK,
CLI_PACKAGE_NAME,
HINT_INTERVAL_DAY,
GET_PYPI_INTERVAL_DAY,
LATEST_VERSION,
CURRENT_VERSION,
)
from promptflow._sdk._constants import HOME_PROMPT_FLOW_DIR
HINT_ACTIVITY_NAME = [
"pf.flows.test",
"pf.runs.create_or_update",
"pfazure.flows.create_or_update",
"pfazure.runs.create_or_update",
]
logger = logging.getLogger(__name__)
def get_cached_versions():
from promptflow._sdk._utils import read_write_by_user
(HOME_PROMPT_FLOW_DIR / PF_VERSION_CHECK).touch(mode=read_write_by_user(), exist_ok=True)
with open(HOME_PROMPT_FLOW_DIR / PF_VERSION_CHECK, "r") as f:
try:
cached_versions = json.load(f)
except json.decoder.JSONDecodeError:
cached_versions = {}
return cached_versions
def dump_cached_versions(cached_versions):
with open(HOME_PROMPT_FLOW_DIR / PF_VERSION_CHECK, "w") as f:
json.dump(cached_versions, f)
def get_latest_version_from_pypi(package_name):
pypi_url = f"https://pypi.org/pypi/{package_name}/json"
try:
import requests
response = requests.get(pypi_url, timeout=3)
if response.status_code == 200:
data = response.json()
latest_version = data["info"]["version"]
return latest_version
else:
return None
except Exception as ex: # pylint: disable=broad-except
logger.debug(f"Failed to get the latest version from '{pypi_url}'. {str(ex)}")
return None
def check_latest_version():
"""Get the latest versions from a cached file"""
cached_versions = get_cached_versions()
last_check_time = (
datetime.datetime.strptime(cached_versions[LAST_CHECK_TIME], "%Y-%m-%d %H:%M:%S.%f")
if LAST_CHECK_TIME in cached_versions
else None
)
if last_check_time is None or (
datetime.datetime.now() > last_check_time + datetime.timedelta(days=GET_PYPI_INTERVAL_DAY)
):
version = get_latest_version_from_pypi(CLI_PACKAGE_NAME)
if version is not None:
cached_versions[LATEST_VERSION] = version
cached_versions[LAST_CHECK_TIME] = str(datetime.datetime.now())
dump_cached_versions(cached_versions)
def hint_for_update():
"""
Check if there is a new version of prompt flow available every 7 days. IF yes, log debug info to hint
customer to upgrade package.
"""
cached_versions = get_cached_versions()
last_hint_time = (
datetime.datetime.strptime(cached_versions[LAST_HINT_TIME], "%Y-%m-%d %H:%M:%S.%f")
if LAST_HINT_TIME in cached_versions
else None
)
if last_hint_time is None or (
datetime.datetime.now() > last_hint_time + datetime.timedelta(days=HINT_INTERVAL_DAY)
):
from promptflow._sdk._utils import get_promptflow_sdk_version
cached_versions[CURRENT_VERSION] = get_promptflow_sdk_version()
if LATEST_VERSION in cached_versions:
from packaging.version import parse
if parse(cached_versions[CURRENT_VERSION]) < parse(cached_versions[LATEST_VERSION]):
cached_versions[LAST_HINT_TIME] = str(datetime.datetime.now())
message = (
f"New prompt flow version available: promptflow-{cached_versions[LATEST_VERSION]}. Running "
f"'pf upgrade' to update CLI."
)
logger.debug(message)
dump_cached_versions(cached_versions)
| promptflow/src/promptflow/promptflow/_utils/version_hint_utils.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_utils/version_hint_utils.py",
"repo_id": "promptflow",
"token_count": 1617
} | 17 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.2, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from msrest import Deserializer, Serializer
from . import models
from ._configuration import AzureMachineLearningDesignerServiceClientConfiguration
from .operations import BulkRunsOperations, ConnectionOperations, ConnectionsOperations, FlowRuntimesOperations, FlowRuntimesWorkspaceIndependentOperations, FlowSessionsOperations, FlowsOperations, FlowsProviderOperations, ToolsOperations, TraceSessionsOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.rest import HttpRequest, HttpResponse
class AzureMachineLearningDesignerServiceClient(object):
"""AzureMachineLearningDesignerServiceClient.
:ivar bulk_runs: BulkRunsOperations operations
:vartype bulk_runs: flow.operations.BulkRunsOperations
:ivar connection: ConnectionOperations operations
:vartype connection: flow.operations.ConnectionOperations
:ivar connections: ConnectionsOperations operations
:vartype connections: flow.operations.ConnectionsOperations
:ivar flow_runtimes: FlowRuntimesOperations operations
:vartype flow_runtimes: flow.operations.FlowRuntimesOperations
:ivar flow_runtimes_workspace_independent: FlowRuntimesWorkspaceIndependentOperations
operations
:vartype flow_runtimes_workspace_independent:
flow.operations.FlowRuntimesWorkspaceIndependentOperations
:ivar flows: FlowsOperations operations
:vartype flows: flow.operations.FlowsOperations
:ivar flow_sessions: FlowSessionsOperations operations
:vartype flow_sessions: flow.operations.FlowSessionsOperations
:ivar flows_provider: FlowsProviderOperations operations
:vartype flows_provider: flow.operations.FlowsProviderOperations
:ivar tools: ToolsOperations operations
:vartype tools: flow.operations.ToolsOperations
:ivar trace_sessions: TraceSessionsOperations operations
:vartype trace_sessions: flow.operations.TraceSessionsOperations
:param base_url: Service URL. Default value is ''.
:type base_url: str
:param api_version: Api Version. The default value is "1.0.0".
:type api_version: str
"""
def __init__(
self,
base_url="", # type: str
api_version="1.0.0", # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
self._config = AzureMachineLearningDesignerServiceClientConfiguration(api_version=api_version, **kwargs)
self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.bulk_runs = BulkRunsOperations(self._client, self._config, self._serialize, self._deserialize)
self.connection = ConnectionOperations(self._client, self._config, self._serialize, self._deserialize)
self.connections = ConnectionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.flow_runtimes = FlowRuntimesOperations(self._client, self._config, self._serialize, self._deserialize)
self.flow_runtimes_workspace_independent = FlowRuntimesWorkspaceIndependentOperations(self._client, self._config, self._serialize, self._deserialize)
self.flows = FlowsOperations(self._client, self._config, self._serialize, self._deserialize)
self.flow_sessions = FlowSessionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.flows_provider = FlowsProviderOperations(self._client, self._config, self._serialize, self._deserialize)
self.tools = ToolsOperations(self._client, self._config, self._serialize, self._deserialize)
self.trace_sessions = TraceSessionsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request, # type: HttpRequest
**kwargs # type: Any
):
# type: (...) -> HttpResponse
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> AzureMachineLearningDesignerServiceClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/_azure_machine_learning_designer_service_client.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/_azure_machine_learning_designer_service_client.py",
"repo_id": "promptflow",
"token_count": 1997
} | 18 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.2, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._flow_sessions_operations import build_create_flow_session_request, build_delete_flow_session_request, build_get_flow_session_request, build_get_standby_pools_request, build_list_flow_session_pip_packages_request, build_poll_operation_status_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class FlowSessionsOperations:
"""FlowSessionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~flow.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def create_flow_session(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
session_id: str,
body: Optional["_models.CreateFlowSessionRequest"] = None,
**kwargs: Any
) -> Any:
"""create_flow_session.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param session_id:
:type session_id: str
:param body:
:type body: ~flow.models.CreateFlowSessionRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: any, or the result of cls(response)
:rtype: any
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Any]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateFlowSessionRequest')
else:
_json = None
request = build_create_flow_session_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
session_id=session_id,
content_type=content_type,
json=_json,
template_url=self.create_flow_session.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
deserialized = self._deserialize('object', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_flow_session.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowSessions/{sessionId}'} # type: ignore
@distributed_trace_async
async def get_flow_session(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
session_id: str,
**kwargs: Any
) -> "_models.GetTrainingSessionDto":
"""get_flow_session.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param session_id:
:type session_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GetTrainingSessionDto, or the result of cls(response)
:rtype: ~flow.models.GetTrainingSessionDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GetTrainingSessionDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_session_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
session_id=session_id,
template_url=self.get_flow_session.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('GetTrainingSessionDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_session.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowSessions/{sessionId}'} # type: ignore
@distributed_trace_async
async def delete_flow_session(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
session_id: str,
**kwargs: Any
) -> Any:
"""delete_flow_session.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param session_id:
:type session_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: any, or the result of cls(response)
:rtype: any
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Any]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_flow_session_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
session_id=session_id,
template_url=self.delete_flow_session.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
deserialized = self._deserialize('object', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_flow_session.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowSessions/{sessionId}'} # type: ignore
@distributed_trace_async
async def list_flow_session_pip_packages(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
session_id: str,
**kwargs: Any
) -> str:
"""list_flow_session_pip_packages.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param session_id:
:type session_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_flow_session_pip_packages_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
session_id=session_id,
template_url=self.list_flow_session_pip_packages.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_flow_session_pip_packages.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowSessions/{sessionId}/pipPackages'} # type: ignore
@distributed_trace_async
async def poll_operation_status(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
session_id: str,
action_type: Union[str, "_models.SetupFlowSessionAction"],
location: str,
operation_id: str,
api_version: Optional[str] = "1.0.0",
type: Optional[str] = None,
**kwargs: Any
) -> Any:
"""poll_operation_status.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param session_id:
:type session_id: str
:param action_type:
:type action_type: str or ~flow.models.SetupFlowSessionAction
:param location:
:type location: str
:param operation_id:
:type operation_id: str
:param api_version: Api Version. The default value is "1.0.0".
:type api_version: str
:param type:
:type type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: any, or the result of cls(response)
:rtype: any
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Any]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_poll_operation_status_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
session_id=session_id,
action_type=action_type,
location=location,
operation_id=operation_id,
api_version=api_version,
type=type,
template_url=self.poll_operation_status.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
poll_operation_status.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowSessions/{sessionId}/{actionType}/locations/{location}/operations/{operationId}'} # type: ignore
@distributed_trace_async
async def get_standby_pools(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> List["_models.StandbyPoolProperties"]:
"""get_standby_pools.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of StandbyPoolProperties, or the result of cls(response)
:rtype: list[~flow.models.StandbyPoolProperties]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.StandbyPoolProperties"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_standby_pools_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.get_standby_pools.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[StandbyPoolProperties]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_standby_pools.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowSessions/standbypools'} # type: ignore
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/operations/_flow_sessions_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/operations/_flow_sessions_operations.py",
"repo_id": "promptflow",
"token_count": 7307
} | 19 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.8.0, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_create_flow_session_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
session_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
waitfor_completion = kwargs.pop('waitfor_completion', False) # type: Optional[bool]
accept = "text/plain, application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowSessionsAdmin/{sessionId}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"sessionId": _SERIALIZER.url("session_id", session_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if waitfor_completion is not None:
query_parameters['waitforCompletion'] = _SERIALIZER.query("waitfor_completion", waitfor_completion, 'bool')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class FlowSessionsAdminOperations(object):
"""FlowSessionsAdminOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~flow.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def create_flow_session(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
session_id, # type: str
waitfor_completion=False, # type: Optional[bool]
body=None, # type: Optional["_models.CreateFlowSessionRequest"]
**kwargs # type: Any
):
# type: (...) -> str
"""create_flow_session.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param session_id:
:type session_id: str
:param waitfor_completion:
:type waitfor_completion: bool
:param body:
:type body: ~flow.models.CreateFlowSessionRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[str]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, "CreateFlowSessionRequest")
else:
_json = None
request = build_create_flow_session_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
session_id=session_id,
content_type=content_type,
json=_json,
waitfor_completion=waitfor_completion,
template_url=self.create_flow_session.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize("str", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_flow_session.metadata = {"url": "/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowSessionsAdmin/{sessionId}"} # type: ignore
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/operations/_flow_sessions_admin_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/operations/_flow_sessions_admin_operations.py",
"repo_id": "promptflow",
"token_count": 2614
} | 20 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from enum import Enum
from typing import Any, Dict, Union
import requests
from azure.ai.ml._restclient.v2023_06_01_preview.models import WorkspaceConnectionPropertiesV2BasicResource
from azure.ai.ml._scope_dependent_operations import (
OperationConfig,
OperationsContainer,
OperationScope,
_ScopeDependentOperations,
)
from azure.core.exceptions import ClientAuthenticationError
from promptflow._sdk.entities._connection import CustomConnection, _Connection
from promptflow._utils.retry_utils import http_retry_wrapper
from promptflow.azure._restclient.flow_service_caller import FlowServiceCaller
from promptflow.azure._utils.gerneral import get_arm_token
from promptflow.exceptions import ErrorTarget, SystemErrorException, UserErrorException
GET_CONNECTION_URL = (
"/subscriptions/{sub}/resourcegroups/{rg}/providers/Microsoft.MachineLearningServices"
"/workspaces/{ws}/connections/{name}/listsecrets?api-version=2023-04-01-preview"
)
LIST_CONNECTION_URL = (
"/subscriptions/{sub}/resourcegroups/{rg}/providers/Microsoft.MachineLearningServices"
"/workspaces/{ws}/connections?api-version=2023-04-01-preview"
)
FLOW_META_PREFIX = "azureml.flow."
class ConnectionCategory(str, Enum):
AzureOpenAI = "AzureOpenAI"
CognitiveSearch = "CognitiveSearch"
CognitiveService = "CognitiveService"
CustomKeys = "CustomKeys"
def get_case_insensitive_key(d, key, default=None):
for k, v in d.items():
if k.lower() == key.lower():
return v
return default
class ArmConnectionOperations(_ScopeDependentOperations):
"""ArmConnectionOperations.
Get connections from arm api. You should not instantiate this class directly. Instead, you should
create an PFClient instance that instantiates it for you and
attaches it as an attribute.
"""
def __init__(
self,
operation_scope: OperationScope,
operation_config: OperationConfig,
all_operations: OperationsContainer,
credential,
service_caller: FlowServiceCaller,
**kwargs: Dict,
):
super(ArmConnectionOperations, self).__init__(operation_scope, operation_config)
self._all_operations = all_operations
self._service_caller = service_caller
self._credential = credential
def get(self, name, **kwargs):
connection_dict = self.build_connection_dict(name)
return _Connection._from_execution_connection_dict(name=name, data=connection_dict)
@classmethod
def _direct_get(cls, name, subscription_id, resource_group_name, workspace_name, credential):
"""
This method is added for local pf_client with workspace provider to ensure we only require limited
permission(workspace/list secrets). As create azure pf_client requires workspace read permission.
"""
connection_dict = cls._build_connection_dict(
name, subscription_id, resource_group_name, workspace_name, credential
)
return _Connection._from_execution_connection_dict(name=name, data=connection_dict)
@classmethod
def open_url(cls, token, url, action, host="management.azure.com", method="GET", model=None) -> Union[Any, dict]:
"""
:type token: str
:type url: str
:type action: str, for the error message format.
:type host: str
:type method: str
:type model: Type[msrest.serialization.Model]
"""
headers = {"Authorization": f"Bearer {token}"}
response = http_retry_wrapper(requests.request)(method, f"https://{host}{url}", headers=headers)
message_format = (
f"Open url {{url}} failed with status code: {response.status_code}, action: {action}, reason: {{reason}}"
)
if response.status_code == 403:
raise AccessDeniedError(operation=url, target=ErrorTarget.RUNTIME)
elif 400 <= response.status_code < 500:
raise OpenURLFailedUserError(
message_format=message_format,
url=url,
reason=response.reason,
)
elif response.status_code != 200:
raise OpenURLFailed(
message_format=message_format,
url=url,
reason=response.reason,
)
data = response.json()
if model:
return model.deserialize(data)
return data
@classmethod
def validate_and_fallback_connection_type(cls, name, type_name, category, metadata):
if type_name:
return type_name
if category == ConnectionCategory.AzureOpenAI:
return "AzureOpenAI"
if category == ConnectionCategory.CognitiveSearch:
return "CognitiveSearch"
if category == ConnectionCategory.CognitiveService:
kind = get_case_insensitive_key(metadata, "Kind")
if kind == "Content Safety":
return "AzureContentSafety"
if kind == "Form Recognizer":
return "FormRecognizer"
raise UnknownConnectionType(
message_format="Connection {name} is not recognized in PromptFlow, "
"please make sure the connection is created in PromptFlow.",
category=category,
name=name,
)
@classmethod
def build_connection_dict_from_rest_object(cls, name, obj) -> dict:
"""
:type name: str
:type obj: azure.ai.ml._restclient.v2023_06_01_preview.models.WorkspaceConnectionPropertiesV2BasicResource
"""
# Reference 1: https://msdata.visualstudio.com/Vienna/_git/vienna?path=/src/azureml-api/src/AccountRP/Contracts/WorkspaceConnection/WorkspaceConnectionDtoV2.cs&_a=blame&version=GBmaster # noqa: E501
# Reference 2: https://msdata.visualstudio.com/Vienna/_git/vienna?path=%2Fsrc%2Fazureml-api%2Fsrc%2FDesigner%2Fsrc%2FMiddleTier%2FMiddleTier%2FServices%2FPromptFlow%2FConnectionsManagement.cs&version=GBmaster&_a=contents # noqa: E501
# This connection type covers the generic ApiKey auth connection categories, for examples:
# AzureOpenAI:
# Category:= AzureOpenAI
# AuthType:= ApiKey (as type discriminator)
# Credentials:= {ApiKey} as <see cref="ApiKey"/>
# Target:= {ApiBase}
#
# CognitiveService:
# Category:= CognitiveService
# AuthType:= ApiKey (as type discriminator)
# Credentials:= {SubscriptionKey} as <see cref="ApiKey"/>
# Target:= ServiceRegion={serviceRegion}
#
# CognitiveSearch:
# Category:= CognitiveSearch
# AuthType:= ApiKey (as type discriminator)
# Credentials:= {Key} as <see cref="ApiKey"/>
# Target:= {Endpoint}
#
# Use Metadata property bag for ApiType, ApiVersion, Kind and other metadata fields
properties = obj.properties
type_name = get_case_insensitive_key(properties.metadata, f"{FLOW_META_PREFIX}connection_type")
type_name = cls.validate_and_fallback_connection_type(name, type_name, properties.category, properties.metadata)
module = get_case_insensitive_key(properties.metadata, f"{FLOW_META_PREFIX}module", "promptflow.connections")
# Note: Category is connectionType in MT, but type name should be class name, which is flowValueType in MT.
# Handle old connections here, see details: https://github.com/Azure/promptflow/tree/main/connections
type_name = f"{type_name}Connection" if not type_name.endswith("Connection") else type_name
meta = {"type": type_name, "module": module}
if properties.category == ConnectionCategory.AzureOpenAI:
value = {
"api_key": properties.credentials.key,
"api_base": properties.target,
"api_type": get_case_insensitive_key(properties.metadata, "ApiType"),
"api_version": get_case_insensitive_key(properties.metadata, "ApiVersion"),
}
# Note: Resource id is required in some cloud scenario, which is not exposed on sdk/cli entity.
resource_id = get_case_insensitive_key(properties.metadata, "ResourceId")
if resource_id:
value["resource_id"] = resource_id
elif properties.category == ConnectionCategory.CognitiveSearch:
value = {
"api_key": properties.credentials.key,
"api_base": properties.target,
"api_version": get_case_insensitive_key(properties.metadata, "ApiVersion"),
}
elif properties.category == ConnectionCategory.CognitiveService:
value = {
"api_key": properties.credentials.key,
"endpoint": properties.target,
"api_version": get_case_insensitive_key(properties.metadata, "ApiVersion"),
}
elif properties.category == ConnectionCategory.CustomKeys:
# Merge secrets from credentials.keys and other string fields from metadata
value = {
**properties.credentials.keys,
**{k: v for k, v in properties.metadata.items() if not k.startswith(FLOW_META_PREFIX)},
}
if type_name == CustomConnection.__name__:
meta["secret_keys"] = list(properties.credentials.keys.keys())
else:
raise UnknownConnectionType(
message_format=(
"Unknown connection {name} category {category}, "
"please upgrade your promptflow sdk version and retry."
),
category=properties.category,
name=name,
)
# Note: Filter empty values out to ensure default values can be picked when init class object.
return {**meta, "value": {k: v for k, v in value.items() if v}}
def build_connection_dict(self, name):
return self._build_connection_dict(
name,
self._operation_scope.subscription_id,
self._operation_scope.resource_group_name,
self._operation_scope.workspace_name,
self._credential,
)
@classmethod
def _convert_to_connection_dict(cls, conn_name, conn_data):
try:
rest_obj = WorkspaceConnectionPropertiesV2BasicResource.deserialize(conn_data)
conn_dict = cls.build_connection_dict_from_rest_object(conn_name, rest_obj)
return conn_dict
except Exception as e:
raise BuildConnectionError(
message_format=f"Build connection dict for connection {{name}} failed with {e}.",
name=conn_name,
)
@classmethod
def _build_connection_dict(cls, name, subscription_id, resource_group_name, workspace_name, credential) -> dict:
"""
:type name: str
:type subscription_id: str
:type resource_group_name: str
:type workspace_name: str
:type credential: azure.identity.TokenCredential
"""
url = GET_CONNECTION_URL.format(
sub=subscription_id,
rg=resource_group_name,
ws=workspace_name,
name=name,
)
try:
rest_obj: WorkspaceConnectionPropertiesV2BasicResource = cls.open_url(
get_arm_token(credential=credential),
url=url,
action="listsecrets",
method="POST",
model=WorkspaceConnectionPropertiesV2BasicResource,
)
except AccessDeniedError:
auth_error_message = (
"Access denied to list workspace secret due to invalid authentication. "
"Please ensure you have gain RBAC role 'Azure Machine Learning Workspace Connection Secrets Reader' "
"for current workspace, and wait for a few minutes to make sure the new role takes effect. "
)
raise OpenURLUserAuthenticationError(message=auth_error_message)
except ClientAuthenticationError as e:
raise UserErrorException(target=ErrorTarget.CONTROL_PLANE_SDK, message=str(e), error=e)
except Exception as e:
raise SystemErrorException(target=ErrorTarget.CONTROL_PLANE_SDK, message=str(e), error=e)
try:
return cls.build_connection_dict_from_rest_object(name, rest_obj)
except Exception as e:
raise BuildConnectionError(
message_format=f"Build connection dict for connection {{name}} failed with {e}.",
name=name,
)
class AccessDeniedError(UserErrorException):
"""Exception raised when run info can not be found in storage"""
def __init__(self, operation: str, target: ErrorTarget):
super().__init__(message=f"Access is denied to perform operation {operation!r}", target=target)
class OpenURLFailed(SystemErrorException):
def __init__(self, **kwargs):
super().__init__(target=ErrorTarget.CONTROL_PLANE_SDK, **kwargs)
class BuildConnectionError(SystemErrorException):
def __init__(self, **kwargs):
super().__init__(target=ErrorTarget.CONTROL_PLANE_SDK, **kwargs)
class UserAuthenticationError(UserErrorException):
"""Exception raised when user authentication failed"""
pass
class OpenURLUserAuthenticationError(UserAuthenticationError):
def __init__(self, **kwargs):
super().__init__(target=ErrorTarget.CONTROL_PLANE_SDK, **kwargs)
class OpenURLFailedUserError(UserErrorException):
def __init__(self, **kwargs):
super().__init__(target=ErrorTarget.CONTROL_PLANE_SDK, **kwargs)
class UnknownConnectionType(UserErrorException):
def __init__(self, **kwargs):
super().__init__(target=ErrorTarget.CONTROL_PLANE_SDK, **kwargs)
| promptflow/src/promptflow/promptflow/azure/operations/_arm_connection_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/operations/_arm_connection_operations.py",
"repo_id": "promptflow",
"token_count": 5724
} | 21 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from pathlib import Path
from typing import Any, List, Mapping, Optional, Tuple
from promptflow._core._errors import UnexpectedError
from promptflow._core.operation_context import OperationContext
from promptflow._core.run_tracker import RunTracker
from promptflow._utils.logger_utils import bulk_logger
from promptflow.batch._base_executor_proxy import AbstractExecutorProxy
from promptflow.contracts.run_mode import RunMode
from promptflow.executor import FlowExecutor
from promptflow.executor._line_execution_process_pool import LineExecutionProcessPool
from promptflow.executor._result import AggregationResult, LineResult
from promptflow.executor._script_executor import ScriptExecutor
from promptflow.storage._run_storage import AbstractRunStorage
class PythonExecutorProxy(AbstractExecutorProxy):
def __init__(self, flow_executor: FlowExecutor):
self._flow_executor = flow_executor
@classmethod
async def create(
cls,
flow_file: Path,
working_dir: Optional[Path] = None,
*,
connections: Optional[dict] = None,
storage: Optional[AbstractRunStorage] = None,
**kwargs,
) -> "PythonExecutorProxy":
flow_executor = FlowExecutor.create(flow_file, connections, working_dir, storage=storage, raise_ex=False)
return cls(flow_executor)
async def exec_aggregation_async(
self,
batch_inputs: Mapping[str, Any],
aggregation_inputs: Mapping[str, Any],
run_id: Optional[str] = None,
) -> AggregationResult:
with self._flow_executor._run_tracker.node_log_manager:
return self._flow_executor._exec_aggregation(batch_inputs, aggregation_inputs, run_id=run_id)
def _exec_batch(
self,
batch_inputs: List[Mapping[str, Any]],
output_dir: Path,
run_id: Optional[str] = None,
batch_timeout_sec: Optional[int] = None,
line_timeout_sec: Optional[int] = None,
worker_count: Optional[int] = None,
) -> Tuple[List[LineResult], bool]:
# TODO: Refine the logic here since the script executor actually doesn't have the 'node' concept
if isinstance(self._flow_executor, ScriptExecutor):
run_tracker = RunTracker(self._flow_executor._storage)
else:
run_tracker = self._flow_executor._run_tracker
with run_tracker.node_log_manager:
OperationContext.get_instance().run_mode = RunMode.Batch.name
if self._flow_executor._flow_file is None:
raise UnexpectedError(
"Unexpected error occurred while init FlowExecutor. Error details: flow file is missing."
)
if batch_timeout_sec:
bulk_logger.info(f"The timeout for the batch run is {batch_timeout_sec} seconds.")
with LineExecutionProcessPool(
self._flow_executor,
len(batch_inputs),
run_id,
output_dir,
batch_timeout_sec=batch_timeout_sec,
line_timeout_sec=line_timeout_sec,
worker_count=worker_count,
) as pool:
line_number = [batch_input["line_number"] for batch_input in batch_inputs]
line_results = pool.run(zip(line_number, batch_inputs))
# For bulk run, currently we need to add line results to run_tracker
self._flow_executor._add_line_results(line_results, run_tracker)
return line_results, pool.is_timeout
def get_inputs_definition(self):
return self._flow_executor.get_inputs_definition()
@classmethod
def _get_tool_metadata(cls, flow_file: Path, working_dir: Path) -> dict:
from promptflow._sdk._utils import generate_flow_tools_json
return generate_flow_tools_json(
flow_directory=working_dir,
dump=False,
used_packages_only=True,
)
| promptflow/src/promptflow/promptflow/batch/_python_executor_proxy.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/batch/_python_executor_proxy.py",
"repo_id": "promptflow",
"token_count": 1635
} | 22 |
import os
from dataclasses import dataclass
from functools import partial
from pathlib import Path
from typing import Callable, Dict, Optional
from promptflow.contracts.flow import InputAssignment, Node, ToolSource
from promptflow.contracts.tool import ToolType
from promptflow.exceptions import ErrorTarget
from promptflow.executor._docstring_parser import DocstringParser
from promptflow.executor._errors import UnsupportedAssistantToolType
from promptflow.executor._tool_resolver import ToolResolver
@dataclass
class AssistantTool:
name: str
openai_definition: dict
func: Callable
class AssistantToolInvoker:
def __init__(self, working_dir: Optional[Path] = None):
self._working_dir = working_dir or Path(os.getcwd())
self._assistant_tools: Dict[str, AssistantTool] = {}
@classmethod
def init(cls, tools: list, working_dir: Optional[Path] = None):
invoker = cls(working_dir=working_dir)
invoker._load_tools(tools)
return invoker
def _load_tools(self, tools: list):
for tool in tools:
if tool["type"] in ("code_interpreter", "retrieval"):
self._assistant_tools[tool["type"]] = AssistantTool(
name=tool["type"], openai_definition=tool, func=None
)
elif tool["type"] == "function":
function_tool = self._load_tool_as_function(tool)
self._assistant_tools[function_tool.name] = function_tool
else:
raise UnsupportedAssistantToolType(
message_format="Unsupported assistant tool type: {tool_type}",
tool_type=tool["type"],
target=ErrorTarget.EXECUTOR,
)
def _load_tool_as_function(self, tool: dict):
tool_resolver = ToolResolver(self._working_dir)
node, predefined_inputs = self._generate_node_for_tool(tool)
resolved_tool = tool_resolver.resolve_tool_by_node(node, convert_input_types=False)
func_name = resolved_tool.definition.function
definition = self._generate_tool_definition(func_name, resolved_tool.definition.description, predefined_inputs)
if resolved_tool.node.inputs:
inputs = {name: value.value for name, value in resolved_tool.node.inputs.items()}
func = partial(resolved_tool.callable, **inputs)
else:
func = resolved_tool.callable
return AssistantTool(name=func_name, openai_definition=definition, func=func)
def _generate_node_for_tool(self, tool: dict):
predefined_inputs = {}
for input_name, value in tool.get("predefined_inputs", {}).items():
predefined_inputs[input_name] = InputAssignment.deserialize(value)
node = Node(
name="assistant_node",
tool="assistant_tool",
inputs=predefined_inputs,
source=ToolSource.deserialize(tool["source"]) if "source" in tool else None,
type=ToolType.PYTHON if "tool_type" in tool and tool["tool_type"] == "python" else None,
)
return node, list(predefined_inputs.keys())
def invoke_tool(self, func_name, kwargs):
return self._assistant_tools[func_name].func(**kwargs)
def to_openai_tools(self):
return [tool.openai_definition for tool in self._assistant_tools.values()]
def _generate_tool_definition(self, func_name: str, description: str, predefined_inputs: list) -> dict:
to_openai_type = {
"str": "string",
"int": "number",
"float": "number",
"bool": "boolean",
"list": "array",
"dict": "object",
}
description, params = DocstringParser.parse(description)
for input in predefined_inputs:
if input in params:
params.pop(input)
for _, param in params.items():
param["type"] = to_openai_type[param["type"]] if param["type"] in to_openai_type else param["type"]
return {
"type": "function",
"function": {
"name": func_name,
"description": description,
"parameters": {"type": "object", "properties": params, "required": list(params.keys())},
},
}
| promptflow/src/promptflow/promptflow/executor/_assistant_tool_invoker.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/executor/_assistant_tool_invoker.py",
"repo_id": "promptflow",
"token_count": 1833
} | 23 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from fastapi import FastAPI
from fastapi.responses import JSONResponse
from promptflow.executor._service.apis.common import router as common_router
from promptflow.executor._service.apis.execution import router as execution_router
from promptflow.executor._service.utils.service_utils import generate_error_response
app = FastAPI()
app.include_router(common_router)
app.include_router(execution_router)
@app.exception_handler(Exception)
async def exception_handler(request, exc):
resp = generate_error_response(exc)
return JSONResponse(status_code=int(resp.response_code), content=resp.to_dict())
| promptflow/src/promptflow/promptflow/executor/_service/app.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/executor/_service/app.py",
"repo_id": "promptflow",
"token_count": 206
} | 24 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import json
from dataclasses import asdict, dataclass
from datetime import datetime
from promptflow._utils.dataclass_serializer import serialize
from promptflow.contracts.run_info import FlowRunInfo, RunInfo
@dataclass
class NodeRunRecord:
"""Dataclass for storing the run record of each node during single line execution on the flow
:param str node_name: The name of the node
:param int line_number: The line number in the source file
:param str run_info: The information about the run
:param datetime start_time: The time the node started running
:param datetime end_time: The time the node finished running
:param str status: The status of the node run
"""
node_name: str
line_number: int
run_info: str
start_time: datetime
end_time: datetime
status: str
@staticmethod
def from_run_info(run_info: RunInfo) -> "NodeRunRecord":
"""Create a NodeRunRecord from a RunInfo object.
:param RunInfo run_info: The run info to create the NodeRunRecord from
:return: The created NodeRunRecord
:rtype: NodeRunRecord
"""
return NodeRunRecord(
node_name=run_info.node,
line_number=run_info.index,
run_info=serialize(run_info),
start_time=run_info.start_time.isoformat(),
end_time=run_info.end_time.isoformat(),
status=run_info.status.value,
)
def serialize(self) -> str:
"""Serialize the NodeRunRecord for storage in blob.
:return: The serialized result
:rtype: str
"""
return json.dumps(asdict(self))
@dataclass
class LineRunRecord:
"""A dataclass for storing the run record of a single line execution on the flow.
:param int line_number: The line number in the record
:param str run_info: The information about the line run
:param datetime start_time: The time the line started executing
:param datetime end_time: The time the line finished executing
:param str name: The name of the line run
:param str description: The description of the line run
:param str status: The status of the line execution
:param str tags: The tags associated with the line run
"""
line_number: int
run_info: str
start_time: datetime
end_time: datetime
name: str
description: str
status: str
tags: str
@staticmethod
def from_run_info(run_info: FlowRunInfo) -> "LineRunRecord":
"""Create a LineRunRecord from a FlowRunInfo object.
:param FlowRunInfo run_info: The run info to create the LineRunRecord from
:return: The created LineRunRecord
:rtype: LineRunRecord
"""
return LineRunRecord(
line_number=run_info.index,
run_info=serialize(run_info),
start_time=run_info.start_time.isoformat(),
end_time=run_info.end_time.isoformat(),
name=run_info.name,
description=run_info.description,
status=run_info.status.value,
tags=run_info.tags,
)
def serialize(self) -> str:
"""Serialize the LineRunRecord for storage in a blob.
:return: The serialized result
:rtype: str
"""
return json.dumps(asdict(self))
| promptflow/src/promptflow/promptflow/storage/run_records.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/storage/run_records.py",
"repo_id": "promptflow",
"token_count": 1297
} | 25 |
# Microsoft Open Source Code of Conduct
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
Resources:
- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
- Contact [[email protected]](mailto:[email protected]) with questions or concerns
| promptflow/CODE_OF_CONDUCT.md/0 | {
"file_path": "promptflow/CODE_OF_CONDUCT.md",
"repo_id": "promptflow",
"token_count": 115
} | 0 |
In prompt flow, you can utilize connections to securely manage credentials or secrets for external services.
# Connections
Connections are for storing information about how to access external services like LLMs: endpoint, api keys etc.
- In your local development environment, the connections are persisted in your local machine with keys encrypted.
- In Azure AI, connections can be configured to be shared across the entire workspace. Secrets associated with connections are securely persisted in the corresponding Azure Key Vault, adhering to robust security and compliance standards.
Prompt flow provides a variety of pre-built connections, including Azure Open AI, Open AI, etc. These pre-built connections enable seamless integration with these resources within the built-in tools. Additionally, you have the flexibility to create custom connection types using key-value pairs, empowering them to tailor the connections to their specific requirements, particularly in Python tools.
| Connection type | Built-in tools |
| ------------------------------------------------------------ | ------------------------------- |
| [Azure Open AI](https://azure.microsoft.com/en-us/products/cognitive-services/openai-service) | LLM or Python |
| [Open AI](https://openai.com/) | LLM or Python |
| [Cognitive Search](https://azure.microsoft.com/en-us/products/search) | Vector DB Lookup or Python |
| [Serp](https://serpapi.com/) | Serp API or Python |
| Custom | Python |
By leveraging connections in prompt flow, you can easily establish and manage connections to external APIs and data sources, facilitating efficient data exchange and interaction within their AI applications.
## Next steps
- [Create connections](../how-to-guides/manage-connections.md) | promptflow/docs/concepts/concept-connections.md/0 | {
"file_path": "promptflow/docs/concepts/concept-connections.md",
"repo_id": "promptflow",
"token_count": 641
} | 1 |
# Develop evaluation flow
:::{admonition} Experimental feature
This is an experimental feature, and may change at any time. Learn [more](../faq.md#stable-vs-experimental).
:::
The evaluation flow is a flow to test/evaluate the quality of your LLM application (standard/chat flow). It usually runs on the outputs of standard/chat flow, and compute key metrics that can be used to determine whether the standard/chat flow performs well. See [Flows](../../concepts/concept-flows.md) for more information.
Before proceeding with this document, it is important to have a good understanding of the standard flow. Please make sure you have read [Develop standard flow](./develop-standard-flow.md), since they share many common features and these features won't be repeated in this doc, such as:
- `Inputs/Outputs definition`
- `Nodes`
- `Chain nodes in a flow`
While the evaluation flow shares similarities with the standard flow, there are some important differences that set it apart. The main distinctions are as follows:
- `Inputs from an existing run`: The evaluation flow contains inputs that are derived from the outputs of the standard/chat flow. These inputs are used for evaluation purposes.
- `Aggregation node`: The evaluation flow contains one or more aggregation nodes, where the actual evaluation takes place. These nodes are responsible for computing metrics and determining the performance of the standard/chat flow.
## Evaluation flow example
In this guide, we use [eval-classification-accuracy](https://github.com/microsoft/promptflow/tree/main/examples/flows/evaluation/eval-classification-accuracy) flow as an example of the evaluation flow. This is a flow illustrating how to evaluate the performance of a classification flow. It involves comparing each prediction to the groundtruth and assigns a `Correct` or `Incorrect` grade, and aggregating the results to produce metrics such as `accuracy`, which reflects how good the system is at classifying the data.
## Flow inputs
The flow `eval-classification-accuracy` contains two inputs:
```yaml
inputs:
groundtruth:
type: string
description: Groundtruth of the original question, it's the correct label that you hope your standard flow could predict.
default: APP
prediction:
type: string
description: The actual predicted outputs that your flow produces.
default: APP
```
As evident from the inputs description, the evaluation flow requires two specific inputs:
- `groundtruth`: This input represents the actual or expected values against which the performance of the standard/chat flow will be evaluated.
- `prediction`: The prediction input is derived from the outputs of another standard/chat flow. It contains the predicted values generated by the standard/chat flow, which will be compared to the groundtruth values during the evaluation process.
From the definition perspective, there is no difference compared with adding an input/output in a `standard/chat flow`. However when running an evaluation flow, you may need to specify the data source from both data file and flow run outputs. For more details please refer to [Run and evaluate a flow](../run-and-evaluate-a-flow/index.md#evaluate-your-flow).
## Aggregation node
Before introducing the aggregation node, let's see what a regular node looks like, we use node `grade` in the example flow for instance:
```yaml
- name: grade
type: python
source:
type: code
path: grade.py
inputs:
groundtruth: ${inputs.groundtruth}
prediction: ${inputs.prediction}
```
It takes both `groundtruth` and `prediction` from the flow inputs, compare them in the source code to see if they match:
```python
from promptflow import tool
@tool
def grade(groundtruth: str, prediction: str):
return "Correct" if groundtruth.lower() == prediction.lower() else "Incorrect"
```
When it comes to an `aggregation node`, there are two key distinctions that set it apart from a regular node:
1. It has an attribute `aggregation` set to be `true`.
```yaml
- name: calculate_accuracy
type: python
source:
type: code
path: calculate_accuracy.py
inputs:
grades: ${grade.output}
aggregation: true # Add this attribute to make it an aggregation node
```
2. Its source code accepts a `List` type parameter which is a collection of the previous regular node's outputs.
```python
from typing import List
from promptflow import log_metric, tool
@tool
def calculate_accuracy(grades: List[str]):
result = []
for index in range(len(grades)):
grade = grades[index]
result.append(grade)
# calculate accuracy for each variant
accuracy = round((result.count("Correct") / len(result)), 2)
log_metric("accuracy", accuracy)
return result
```
The parameter `grades` in above function, contains all results that are produced by the regular node `grade`. Assuming the referred standard flow run has 3 outputs:
```json
{"prediction": "App"}
{"prediction": "Channel"}
{"prediction": "Academic"}
```
And we provides a data file like this:
```json
{"groundtruth": "App"}
{"groundtruth": "Channel"}
{"groundtruth": "Wiki"}
```
Then the `grades` value would be `["Correct", "Correct", "Incorrect"]`, and the final accuracy is `0.67`.
This example provides a straightforward demonstration of how to evaluate the classification flow. Once you have a solid understanding of the evaluation mechanism, you can customize and design your own evaluation method to suit your specific needs.
### More about the list parameter
What if the number of referred standard flow run outputs does not match the provided data file? We know that a standard flow can be executed against multiple line data and some of them could fail while others succeed. Consider the same standard flow run mentioned in above example but the `2nd` line run has failed, thus we have below run outputs:
```json
{"prediction": "App"}
{"prediction": "Academic"}
```
The promptflow flow executor has the capability to recognize the index of the referred run's outputs and extract the corresponding data from the provided data file. This means that during the execution process, even if the same data file is provided(3 lines), only the specific data mentioned below will be processed:
```json
{"groundtruth": "App"}
{"groundtruth": "Wiki"}
```
In this case, the `grades` value would be `["Correct", "Incorrect"]` and the accuracy is `0.5`.
### How to set aggregation node in VS Code Extention
![img](../../media/how-to-guides/develop-evaluation-flow/set_aggregation_node_in_vscode.png)
## How to log metrics
:::{admonition} Limitation
You can only log metrics in an `aggregation node`, otherwise the metric will be ignored.
:::
Promptflow supports logging and tracking experiments using `log_metric` function. A metric is a key-value pair that records a single float measure. In a python node, you can log a metric with below code:
```python
from typing import List
from promptflow import log_metric, tool
@tool
def example_log_metrics(grades: List[str]):
# this node is an aggregation node so it accepts a list of grades
metric_key = "accuracy"
metric_value = round((grades.count("Correct") / len(result)), 2)
log_metric(metric_key, metric_value)
```
After the run is completed, you can run `pf run show-metrics -n <run_name>` to see the metrics.
![img](../../media/how-to-guides/run_show_metrics.png)
| promptflow/docs/how-to-guides/develop-a-flow/develop-evaluation-flow.md/0 | {
"file_path": "promptflow/docs/how-to-guides/develop-a-flow/develop-evaluation-flow.md",
"repo_id": "promptflow",
"token_count": 1939
} | 2 |
# How-to Guides
Simple and short articles grouped by topics, each introduces a core feature of prompt flow and how you can use it to address your specific use cases.
```{toctree}
:maxdepth: 1
develop-a-flow/index
init-and-test-a-flow
add-conditional-control-to-a-flow
run-and-evaluate-a-flow/index
tune-prompts-with-variants
execute-flow-as-a-function
deploy-a-flow/index
enable-streaming-mode
manage-connections
manage-runs
set-global-configs
develop-a-tool/index
process-image-in-flow
faq
```
| promptflow/docs/how-to-guides/index.md/0 | {
"file_path": "promptflow/docs/how-to-guides/index.md",
"repo_id": "promptflow",
"token_count": 174
} | 3 |
# pf
:::{admonition} Experimental feature
This is an experimental feature, and may change at any time. Learn [more](../how-to-guides/faq.md#stable-vs-experimental).
:::
Manage prompt flow resources with the prompt flow CLI.
| Command | Description |
|---------------------------------|---------------------------------|
| [pf flow](#pf-flow) | Manage flows. |
| [pf connection](#pf-connection) | Manage connections. |
| [pf run](#pf-run) | Manage runs. |
| [pf tool](#pf-tool) | Init or list tools. |
| [pf config](#pf-config) | Manage config for current user. |
| [pf upgrade](#pf-upgrade) | Upgrade prompt flow CLI. |
## pf flow
Manage promptflow flow flows.
| Command | Description |
| --- | --- |
| [pf flow init](#pf-flow-init) | Initialize a prompt flow directory. |
| [pf flow test](#pf-flow-test) | Test the prompt flow or flow node. |
| [pf flow validate](#pf-flow-validate) | Validate a flow and generate `flow.tools.json` for it. |
| [pf flow build](#pf-flow-build) | Build a flow for further sharing or deployment. |
| [pf flow serve](#pf-flow-serve) | Serve a flow as an endpoint. |
### pf flow init
Initialize a prompt flow directory.
```bash
pf flow init [--flow]
[--entry]
[--function]
[--prompt-template]
[--type]
[--yes]
```
#### Examples
Create a flow folder with code, prompts and YAML specification of the flow.
```bash
pf flow init --flow <path-to-flow-direcotry>
```
Create an evaluation prompt flow
```bash
pf flow init --flow <path-to-flow-direcotry> --type evaluation
```
Create a flow in existing folder
```bash
pf flow init --flow <path-to-existing-folder> --entry <entry.py> --function <function-name> --prompt-template <path-to-prompt-template.md>
```
#### Optional Parameters
`--flow`
The flow name to create.
`--entry`
The entry file name.
`--function`
The function name in entry file.
`--prompt-template`
The prompt template parameter and assignment.
`--type`
The initialized flow type.
accepted value: standard, evaluation, chat
`--yes --assume-yes -y`
Automatic yes to all prompts; assume 'yes' as answer to all prompts and run non-interactively.
### pf flow test
Test the prompt flow or flow node.
```bash
pf flow test --flow
[--inputs]
[--node]
[--variant]
[--debug]
[--interactive]
[--verbose]
```
#### Examples
Test the flow.
```bash
pf flow test --flow <path-to-flow-directory>
```
Test the flow with single line from input file.
```bash
pf flow test --flow <path-to-flow-directory> --inputs data_key1=data_val1 data_key2=data_val2
```
Test the flow with specified variant node.
```bash
pf flow test --flow <path-to-flow-directory> --variant '${node_name.variant_name}'
```
Test the single node in the flow.
```bash
pf flow test --flow <path-to-flow-directory> --node <node_name>
```
Debug the single node in the flow.
```bash
pf flow test --flow <path-to-flow-directory> --node <node_name> --debug
```
Chat in the flow.
```bash
pf flow test --flow <path-to-flow-directory> --node <node_name> --interactive
```
#### Required Parameter
`--flow`
The flow directory to test.
#### Optional Parameters
`--inputs`
Input data for the flow. Example: --inputs data1=data1_val data2=data2_val
`--node`
The node name in the flow need to be tested.
`--variant`
Node & variant name in format of ${node_name.variant_name}.
`--debug`
Debug the single node in the flow.
`--interactive`
Start a interactive chat session for chat flow.
`--verbose`
Displays the output for each step in the chat flow.
### pf flow validate
Validate the prompt flow and generate a `flow.tools.json` under `.promptflow`. This file is required when using flow as a component in a Azure ML pipeline.
```bash
pf flow validate --source
[--debug]
[--verbose]
```
#### Examples
Validate the flow.
```bash
pf flow validate --source <path-to-flow>
```
#### Required Parameter
`--source`
The flow source to validate.
### pf flow build
Build a flow for further sharing or deployment.
```bash
pf flow build --source
--output
--format
[--variant]
[--verbose]
[--debug]
```
#### Examples
Build a flow as docker, which can be built into Docker image via `docker build`.
```bash
pf flow build --source <path-to-flow> --output <output-path> --format docker
```
Build a flow as docker with specific variant.
```bash
pf flow build --source <path-to-flow> --output <output-path> --format docker --variant '${node_name.variant_name}'
```
#### Required Parameter
`--source`
The flow or run source to be used.
`--output`
The folder to output built flow. Need to be empty or not existed.
`--format`
The format to build flow into
#### Optional Parameters
`--variant`
Node & variant name in format of ${node_name.variant_name}.
`--verbose`
Show more details for each step during build.
`--debug`
Show debug information during build.
### pf flow serve
Serving a flow as an endpoint.
```bash
pf flow serve --source
[--port]
[--host]
[--environment-variables]
[--verbose]
[--debug]
[--skip-open-browser]
```
#### Examples
Serve flow as an endpoint.
```bash
pf flow serve --source <path-to-flow>
```
Serve flow as an endpoint with specific port and host.
```bash
pf flow serve --source <path-to-flow> --port <port> --host <host> --environment-variables key1="`${my_connection.api_key}`" key2="value2"
```
#### Required Parameter
`--source`
The flow or run source to be used.
#### Optional Parameters
`--port`
The port on which endpoint to run.
`--host`
The host of endpoint.
`--environment-variables`
Environment variables to set by specifying a property path and value. Example: --environment-variable key1="\`${my_connection.api_key}\`" key2="value2". The value reference to connection keys will be resolved to the actual value, and all environment variables specified will be set into `os.environ`.
`--verbose`
Show more details for each step during serve.
`--debug`
Show debug information during serve.
`--skip-open-browser`
Skip opening browser after serve. Store true parameter.
## pf connection
Manage prompt flow connections.
| Command | Description |
| --- | --- |
| [pf connection create](#pf-connection-create) | Create a connection. |
| [pf connection update](#pf-connection-update) | Update a connection. |
| [pf connection show](#pf-connection-show) | Show details of a connection. |
| [pf connection list](#pf-connection-list) | List all the connection. |
| [pf connection delete](#pf-connection-delete) | Delete a connection. |
### pf connection create
Create a connection.
```bash
pf connection create --file
[--name]
[--set]
```
#### Examples
Create a connection with YAML file.
```bash
pf connection create -f <yaml-filename>
```
Create a connection with YAML file with override.
```bash
pf connection create -f <yaml-filename> --set api_key="<api-key>"
```
Create a custom connection with .env file; note that overrides specified by `--set` will be ignored.
```bash
pf connection create -f .env --name <name>
```
#### Required Parameter
`--file -f`
Local path to the YAML file containing the prompt flow connection specification.
#### Optional Parameters
`--name -n`
Name of the connection.
`--set`
Update an object by specifying a property path and value to set. Example: --set property1.property2=.
### pf connection update
Update a connection.
```bash
pf connection update --name
[--set]
```
#### Example
Update a connection.
```bash
pf connection update -n <name> --set api_key="<api-key>"
```
#### Required Parameter
`--name -n`
Name of the connection.
#### Optional Parameter
`--set`
Update an object by specifying a property path and value to set. Example: --set property1.property2=.
### pf connection show
Show details of a connection.
```bash
pf connection show --name
```
#### Required Parameter
`--name -n`
Name of the connection.
### pf connection list
List all the connection.
```bash
pf connection list
```
### pf connection delete
Delete a connection.
```bash
pf connection delete --name
```
#### Required Parameter
`--name -n`
Name of the connection.
## pf run
Manage prompt flow runs.
| Command | Description |
| --- | --- |
| [pf run create](#pf-run-create) | Create a run. |
| [pf run update](#pf-run-update) | Update a run metadata, including display name, description and tags. |
| [pf run stream](#pf-run-stream) | Stream run logs to the console. |
| [pf run list](#pf-run-list) | List runs. |
| [pf run show](#pf-run-show) | Show details for a run. |
| [pf run show-details](#pf-run-show-details) | Preview a run's intput(s) and output(s). |
| [pf run show-metrics](#pf-run-show-metrics) | Print run metrics to the console. |
| [pf run visualize](#pf-run-visualize) | Visualize a run. |
| [pf run archive](#pf-run-archive) | Archive a run. |
| [pf run restore](#pf-run-restore) | Restore an archived run. |
### pf run create
Create a run.
```bash
pf run create [--file]
[--flow]
[--data]
[--column-mapping]
[--run]
[--variant]
[--stream]
[--environment-variables]
[--connections]
[--set]
[--source]
```
#### Examples
Create a run with YAML file.
```bash
pf run create -f <yaml-filename>
```
Create a run with YAML file and replace another data in the YAML file.
```bash
pf run create -f <yaml-filename> --data <path-to-new-data-file-relative-to-yaml-file>
```
Create a run from flow directory and reference a run.
```bash
pf run create --flow <path-to-flow-directory> --data <path-to-data-file> --column-mapping groundtruth='${data.answer}' prediction='${run.outputs.category}' --run <run-name> --variant '${summarize_text_content.variant_0}' --stream
```
Create a run from an existing run record folder.
```bash
pf run create --source <path-to-run-folder>
```
#### Optional Parameters
`--file -f`
Local path to the YAML file containing the prompt flow run specification; can be overwritten by other parameters. Reference [here](https://azuremlschemas.azureedge.net/promptflow/latest/Run.schema.json) for YAML schema.
`--flow`
Local path to the flow directory. If --file is provided, this path should be relative path to the file.
`--data`
Local path to the data file. If --file is provided, this path should be relative path to the file.
`--column-mapping`
Inputs column mapping, use `${data.xx}` to refer to data columns, use `${run.inputs.xx}` to refer to referenced run's data columns, and `${run.outputs.xx}` to refer to run outputs columns.
`--run`
Referenced flow run name. For example, you can run an evaluation flow against an existing run. For example, "pf run create --flow evaluation_flow_dir --run existing_bulk_run".
`--variant`
Node & variant name in format of `${node_name.variant_name}`.
`--stream -s`
Indicates whether to stream the run's logs to the console.
default value: False
`--environment-variables`
Environment variables to set by specifying a property path and value. Example:
`--environment-variable key1='${my_connection.api_key}' key2='value2'`. The value reference
to connection keys will be resolved to the actual value, and all environment variables
specified will be set into os.environ.
`--connections`
Overwrite node level connections with provided value.
Example: `--connections node1.connection=test_llm_connection node1.deployment_name=gpt-35-turbo`
`--set`
Update an object by specifying a property path and value to set.
Example: `--set property1.property2=<value>`.
`--source`
Local path to the existing run record folder.
### pf run update
Update a run metadata, including display name, description and tags.
```bash
pf run update --name
[--set]
```
#### Example
Update a run
```bash
pf run update -n <name> --set display_name="<display-name>" description="<description>" tags.key="value"
```
#### Required Parameter
`--name -n`
Name of the run.
#### Optional Parameter
`--set`
Update an object by specifying a property path and value to set. Example: --set property1.property2=.
### pf run stream
Stream run logs to the console.
```bash
pf run stream --name
```
#### Required Parameter
`--name -n`
Name of the run.
### pf run list
List runs.
```bash
pf run list [--all-results]
[--archived-only]
[--include-archived]
[--max-results]
```
#### Optional Parameters
`--all-results`
Returns all results.
default value: False
`--archived-only`
List archived runs only.
default value: False
`--include-archived`
List archived runs and active runs.
default value: False
`--max-results -r`
Max number of results to return. Default is 50.
default value: 50
### pf run show
Show details for a run.
```bash
pf run show --name
```
#### Required Parameter
`--name -n`
Name of the run.
### pf run show-details
Preview a run's input(s) and output(s).
```bash
pf run show-details --name
```
#### Required Parameter
`--name -n`
Name of the run.
### pf run show-metrics
Print run metrics to the console.
```bash
pf run show-metrics --name
```
#### Required Parameter
`--name -n`
Name of the run.
### pf run visualize
Visualize a run in the browser.
```bash
pf run visualize --names
```
#### Required Parameter
`--names -n`
Name of the runs, comma separated.
### pf run archive
Archive a run.
```bash
pf run archive --name
```
#### Required Parameter
`--name -n`
Name of the run.
### pf run restore
Restore an archived run.
```bash
pf run restore --name
```
#### Required Parameter
`--name -n`
Name of the run.
## pf tool
Manage promptflow tools.
| Command | Description |
| --- | --- |
| [pf tool init](#pf-tool-init) | Initialize a tool directory. |
| [pf tool list](#pf-tool-list) | List all tools in the environment. |
| [pf tool validate](#pf-tool-validate) | Validate tools. |
### pf tool init
Initialize a tool directory.
```bash
pf tool init [--package]
[--tool]
[--set]
```
#### Examples
Creating a package tool from scratch.
```bash
pf tool init --package <package-name> --tool <tool-name>
```
Creating a package tool with extra info.
```bash
pf tool init --package <package-name> --tool <tool-name> --set icon=<icon-path> category=<tool-category> tags="{'<key>': '<value>'}"
```
Creating a package tool from scratch.
```bash
pf tool init --package <package-name> --tool <tool-name>
```
Creating a python tool from scratch.
```bash
pf tool init --tool <tool-name>
```
#### Optional Parameters
`--package`
The package name to create.
`--tool`
The tool name to create.
`--set`
Set extra information about the tool, like category, icon and tags. Example: --set <key>=<value>.
### pf tool list
List all tools in the environment.
```bash
pf tool list [--flow]
```
#### Examples
List all package tool in the environment.
```bash
pf tool list
```
List all package tool and code tool in the flow.
```bash
pf tool list --flow <path-to-flow-direcotry>
```
#### Optional Parameters
`--flow`
The flow directory.
### pf tool validate
Validate tool.
```bash
pf tool validate --source
```
#### Examples
Validate single function tool.
```bash
pf tool validate -–source <package-name>.<module-name>.<tool-function>
```
Validate all tool in a package tool.
```bash
pf tool validate -–source <package-name>
```
Validate tools in a python script.
```bash
pf tool validate --source <path-to-tool-script>
```
#### Required Parameter
`--source`
The tool source to be used.
## pf config
Manage config for current user.
| Command | Description |
|-----------------------------------|--------------------------------------------|
| [pf config set](#pf-config-set) | Set prompt flow configs for current user. |
| [pf config show](#pf-config-show) | Show prompt flow configs for current user. |
### pf config set
Set prompt flow configs for current user, configs will be stored at ~/.promptflow/pf.yaml.
```bash
pf config set
```
#### Examples
Config connection provider to azure workspace for current user.
```bash
pf config set connection.provider="azureml://subscriptions/<your-subscription>/resourceGroups/<your-resourcegroup>/providers/Microsoft.MachineLearningServices/workspaces/<your-workspace>"
```
### pf config show
Show prompt flow configs for current user.
```bash
pf config show
```
#### Examples
Show prompt flow for current user.
```bash
pf config show
```
## pf upgrade
Upgrade prompt flow CLI.
| Command | Description |
|-----------------------------|-----------------------------|
| [pf upgrade](#pf-upgrade) | Upgrade prompt flow CLI. |
### Examples
Upgrade prompt flow without prompt and run non-interactively.
```bash
pf upgrade --yes
``` | promptflow/docs/reference/pf-command-reference.md/0 | {
"file_path": "promptflow/docs/reference/pf-command-reference.md",
"repo_id": "promptflow",
"token_count": 6270
} | 4 |
[flake8]
extend-ignore = E203, E266, W503, F403, F821
max-line-length = 120
enable-extensions = E123,E133,E241,E242,E704,W505
exclude =
.git
.tox
.eggs
__pycache__
tests/fixtures/*
docs/*
venv,.pytest_cache
build
src/promptflow/promptflow/azure/_restclient
src/promptflow/tests/test_configs/*
import-order-style = google
[mypy]
ignore_missing_imports = True
disallow_untyped_defs = True
[mypy-pytest,pytest_mock]
ignore_missing_imports = True
[tool:pycln]
quiet = True
[black]
line_length = 120
[pycln]
silence = True
[isort]
# we use check for make fmt*
profile = "black"
# no need to fmt ignored
skip_gitignore = true
# needs to be the same as in black
line_length = 120
use_parentheses = true
include_trailing_comma = true
honor_noqa = true
ensure_newline_before_comments = true
skip_glob = [
docs/**,
pipelines/**,
pytest/**,
samples/**,
]
known_third_party = azure,mock,numpy,pandas,pydash,pytest,pytest_mock,requests,setuptools,six,sklearn,tqdm,urllib3,utilities,utils,yaml,jsonschema,strictyaml,jwt,pathspec,isodate,docker
known_first_party = promptflow,promptflow_test
| promptflow/setup.cfg/0 | {
"file_path": "promptflow/setup.cfg",
"repo_id": "promptflow",
"token_count": 494
} | 5 |
from enum import Enum
try:
from openai import OpenAI as OpenAIClient
except Exception:
raise Exception(
"Please upgrade your OpenAI package to version 1.0.0 or later using the command: pip install --upgrade openai.")
from promptflow.tools.common import render_jinja_template, handle_openai_error, \
parse_chat, to_bool, validate_functions, process_function_call, \
post_process_chat_api_response, normalize_connection_config
# Avoid circular dependencies: Use import 'from promptflow._internal' instead of 'from promptflow'
# since the code here is in promptflow namespace as well
from promptflow._internal import ToolProvider, tool, register_apis
from promptflow.connections import OpenAIConnection
from promptflow.contracts.types import PromptTemplate
class Engine(str, Enum):
TEXT_DAVINCI_001 = "text-davinci-001"
TEXT_DAVINCI_002 = "text-davinci-002"
TEXT_DAVINCI_003 = "text-davinci-003"
TEXT_CURIE_001 = "text-curie-001"
TEXT_BABBAGE_001 = "text-babbage-001"
TEXT_ADA_001 = "text-ada-001"
CODE_CUSHMAN_001 = "code-cushman-001"
CODE_DAVINCI_002 = "code-davinci-002"
class OpenAI(ToolProvider):
def __init__(self, connection: OpenAIConnection):
super().__init__()
self._connection_dict = normalize_connection_config(connection)
self._client = OpenAIClient(**self._connection_dict)
@tool
@handle_openai_error()
def completion(
self,
prompt: PromptTemplate,
model: Engine = Engine.TEXT_DAVINCI_003,
suffix: str = None,
max_tokens: int = 16,
temperature: float = 1.0,
top_p: float = 1.0,
n: int = 1,
# stream is a hidden to the end user, it is only supposed to be set by the executor.
stream: bool = False,
logprobs: int = None,
echo: bool = False,
stop: list = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
best_of: int = 1,
logit_bias: dict = {},
user: str = "",
**kwargs,
) -> str:
prompt = render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **kwargs)
# TODO: remove below type conversion after client can pass json rather than string.
echo = to_bool(echo)
stream = to_bool(stream)
response = self._client.completions.create(
prompt=prompt,
model=model.value if isinstance(model, Enum) else model,
# empty string suffix should be treated as None.
suffix=suffix if suffix else None,
max_tokens=int(max_tokens),
temperature=float(temperature),
top_p=float(top_p),
n=int(n),
stream=stream,
logprobs=int(logprobs) if logprobs else None,
echo=echo,
stop=stop if stop else None,
presence_penalty=float(presence_penalty),
frequency_penalty=float(frequency_penalty),
best_of=int(best_of),
# Logit bias must be a dict if we passed it to openai api.
logit_bias=logit_bias if logit_bias else {},
user=user
)
if stream:
def generator():
for chunk in response:
if chunk.choices:
yield getattr(chunk.choices[0], "text", "")
# We must return the generator object, not using yield directly here.
# Otherwise, the function itself will become a generator, despite whether stream is True or False.
return generator()
else:
# get first element because prompt is single.
return response.choices[0].text
@tool
@handle_openai_error()
def chat(
self,
prompt: PromptTemplate,
model: str = "gpt-3.5-turbo",
temperature: float = 1.0,
top_p: float = 1.0,
n: int = 1,
# stream is a hidden to the end user, it is only supposed to be set by the executor.
stream: bool = False,
stop: list = None,
max_tokens: int = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
logit_bias: dict = {},
user: str = "",
# function_call can be of type str or dict.
function_call: object = None,
functions: list = None,
response_format: object = None,
**kwargs
) -> [str, dict]:
chat_str = render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **kwargs)
messages = parse_chat(chat_str)
# TODO: remove below type conversion after client can pass json rather than string.
stream = to_bool(stream)
params = {
"model": model,
"messages": messages,
"temperature": float(temperature),
"top_p": float(top_p),
"n": int(n),
"stream": stream,
"stop": stop if stop else None,
"max_tokens": int(max_tokens) if max_tokens is not None and str(max_tokens).lower() != "inf" else None,
"presence_penalty": float(presence_penalty),
"frequency_penalty": float(frequency_penalty),
"logit_bias": logit_bias,
"user": user,
"response_format": response_format
}
if functions is not None:
validate_functions(functions)
params["functions"] = functions
params["function_call"] = process_function_call(function_call)
completion = self._client.chat.completions.create(**params)
return post_process_chat_api_response(completion, stream, functions)
register_apis(OpenAI)
@tool
def completion(
connection: OpenAIConnection,
prompt: PromptTemplate,
model: Engine = Engine.TEXT_DAVINCI_003,
suffix: str = None,
max_tokens: int = 16,
temperature: float = 1.0,
top_p: float = 1,
n: int = 1,
stream: bool = False,
logprobs: int = None,
echo: bool = False,
stop: list = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
best_of: int = 1,
logit_bias: dict = {},
user: str = "",
**kwargs
) -> [str, dict]:
return OpenAI(connection).completion(
prompt=prompt,
model=model,
suffix=suffix,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
n=n,
stream=stream,
logprobs=logprobs,
echo=echo,
stop=stop if stop else None,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
best_of=best_of,
logit_bias=logit_bias,
user=user,
**kwargs,
)
@tool
def chat(
connection: OpenAIConnection,
prompt: PromptTemplate,
model: str = "gpt-3.5-turbo",
temperature: float = 1,
top_p: float = 1,
n: int = 1,
stream: bool = False,
stop: list = None,
max_tokens: int = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
logit_bias: dict = {},
user: str = "",
function_call: object = None,
functions: list = None,
response_format: object = None,
**kwargs
) -> [str, dict]:
return OpenAI(connection).chat(
prompt=prompt,
model=model,
temperature=temperature,
top_p=top_p,
n=n,
stream=stream,
stop=stop if stop else None,
max_tokens=max_tokens,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
logit_bias=logit_bias,
user=user,
function_call=function_call,
functions=functions,
response_format=response_format,
**kwargs,
)
| promptflow/src/promptflow-tools/promptflow/tools/openai.py/0 | {
"file_path": "promptflow/src/promptflow-tools/promptflow/tools/openai.py",
"repo_id": "promptflow",
"token_count": 3479
} | 6 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import argparse
import json
import re
import shutil
from pathlib import Path
from promptflow._cli._params import add_param_set_tool_extra_info, base_params
from promptflow._cli._pf._init_entry_generators import (
InitGenerator,
SetupGenerator,
ToolPackageGenerator,
ToolPackageUtilsGenerator,
ToolReadmeGenerator,
)
from promptflow._cli._utils import activate_action, exception_handler, list_of_dict_to_dict
from promptflow._sdk._constants import DEFAULT_ENCODING
from promptflow._sdk._pf_client import PFClient
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.exceptions import UserErrorException
logger = get_cli_sdk_logger()
def add_tool_parser(subparsers):
"""Add flow parser to the pf subparsers."""
tool_parser = subparsers.add_parser(
"tool",
description="Manage tools for promptflow.",
help="pf tool",
)
subparsers = tool_parser.add_subparsers()
add_parser_init_tool(subparsers)
add_parser_list_tool(subparsers)
add_parser_validate_tool(subparsers)
tool_parser.set_defaults(action="tool")
def add_parser_init_tool(subparsers):
"""Add tool init parser to the pf tool subparsers."""
epilog = """
Examples:
# Creating a package tool from scratch:
pf tool init --package package_tool --tool tool_name
# Creating a package tool with extra info:
pf tool init --package package_tool --tool tool_name --set icon=<icon-path> category=<category>
# Creating a python tool from scratch:
pf tool init --tool tool_name
""" # noqa: E501
add_param_package = lambda parser: parser.add_argument( # noqa: E731
"--package", type=str, help="The package name to create."
)
add_param_tool = lambda parser: parser.add_argument( # noqa: E731
"--tool", type=str, required=True, help="The tool name to create."
)
add_params = [
add_param_package,
add_param_tool,
add_param_set_tool_extra_info,
] + base_params
return activate_action(
name="init",
description="Creating a tool.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Initialize a tool directory.",
action_param_name="sub_action",
)
def add_parser_list_tool(subparsers):
"""Add tool list parser to the pf tool subparsers."""
epilog = """
Examples:
# List all package tool in the environment:
pf tool list
# List all package tool and code tool in the flow:
pf tool list --flow flow-path
""" # noqa: E501
add_param_flow = lambda parser: parser.add_argument("--flow", type=str, help="the flow directory") # noqa: E731
add_params = [
add_param_flow,
] + base_params
return activate_action(
name="list",
description="List tools.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="List all tools in the environment.",
action_param_name="sub_action",
)
def add_parser_validate_tool(subparsers):
"""Add tool list parser to the pf tool subparsers."""
epilog = """
Examples:
# Validate single function tool:
pf tool validate -–source <package_name>.<module_name>.<tool_function>
# Validate all tool in a package tool:
pf tool validate -–source <package_name>
# Validate tools in a python script:
pf tool validate --source <path_to_tool_script>
""" # noqa: E501
def add_param_source(parser):
parser.add_argument("--source", type=str, help="The tool source to be used.", required=True)
return activate_action(
name="validate",
description="Validate tool.",
epilog=epilog,
add_params=[
add_param_source,
],
subparsers=subparsers,
help_message="Validate tool. Will raise error if it is not valid.",
action_param_name="sub_action",
)
def dispatch_tool_commands(args: argparse.Namespace):
if args.sub_action == "init":
init_tool(args)
elif args.sub_action == "list":
list_tool(args)
elif args.sub_action == "validate":
validate_tool(args)
@exception_handler("Tool init")
def init_tool(args):
# Validate package/tool name
pattern = r"^[a-zA-Z_][a-zA-Z0-9_]*$"
if args.package and not re.match(pattern, args.package):
raise UserErrorException(f"The package name {args.package} is a invalid identifier.")
if not re.match(pattern, args.tool):
raise UserErrorException(f"The tool name {args.tool} is a invalid identifier.")
print("Creating tool from scratch...")
extra_info = list_of_dict_to_dict(args.extra_info)
icon_path = extra_info.pop("icon", None)
if icon_path and not Path(icon_path).exists():
raise UserErrorException(f"Cannot find the icon path {icon_path}.")
if args.package:
package_path = Path(args.package)
package_name = package_path.stem
script_code_path = package_path / package_name
script_code_path.mkdir(parents=True, exist_ok=True)
# Generate manifest file
manifest_file = package_path / "MANIFEST.in"
manifest_file.touch(exist_ok=True)
with open(manifest_file, "r") as f:
manifest_contents = [line.strip() for line in f.readlines()]
if icon_path:
package_icon_path = package_path / "icons"
package_icon_path.mkdir(exist_ok=True)
dst = shutil.copy2(icon_path, package_icon_path)
icon_path = f'Path(__file__).parent.parent / "icons" / "{Path(dst).name}"'
icon_manifest = f"include {package_name}/icons"
if icon_manifest not in manifest_contents:
manifest_contents.append(icon_manifest)
with open(manifest_file, "w", encoding=DEFAULT_ENCODING) as f:
f.writelines("\n".join(set(manifest_contents)))
# Generate package setup.py
SetupGenerator(package_name=package_name, tool_name=args.tool).generate_to_file(package_path / "setup.py")
# Generate utils.py to list meta data of tools.
ToolPackageUtilsGenerator(package_name=package_name).generate_to_file(script_code_path / "utils.py")
ToolReadmeGenerator(package_name=package_name, tool_name=args.tool).generate_to_file(package_path / "README.md")
else:
script_code_path = Path(".")
if icon_path:
icon_path = f'"{Path(icon_path).as_posix()}"'
# Generate tool script
ToolPackageGenerator(tool_name=args.tool, icon=icon_path, extra_info=extra_info).generate_to_file(
script_code_path / f"{args.tool}.py"
)
InitGenerator().generate_to_file(script_code_path / "__init__.py")
print(f'Done. Created the tool "{args.tool}" in {script_code_path.resolve()}.')
@exception_handler("Tool list")
def list_tool(args):
pf_client = PFClient()
package_tools = pf_client._tools.list(args.flow)
print(json.dumps(package_tools, indent=4))
@exception_handler("Tool validate")
def validate_tool(args):
import importlib
pf_client = PFClient()
try:
__import__(args.source)
source = importlib.import_module(args.source)
logger.debug(f"The source {args.source} is used as a package to validate.")
except ImportError:
try:
module_name, func_name = args.source.rsplit(".", 1)
module = importlib.import_module(module_name)
source = getattr(module, func_name)
logger.debug(f"The source {args.source} is used as a function to validate.")
except Exception:
if not Path(args.source).exists():
raise UserErrorException("Invalid source to validate tools.")
logger.debug(f"The source {args.source} is used as a script to validate.")
source = args.source
validation_result = pf_client._tools.validate(source)
print(repr(validation_result))
if not validation_result.passed:
exit(1)
| promptflow/src/promptflow/promptflow/_cli/_pf/_tool.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/_pf/_tool.py",
"repo_id": "promptflow",
"token_count": 3192
} | 7 |