repo_id
stringlengths
15
132
file_path
stringlengths
34
176
content
stringlengths
2
3.52M
__index_level_0__
int64
0
0
promptflow_repo/promptflow/scripts/distributing
promptflow_repo/promptflow/scripts/distributing/configs/distribution_settings.json
{ "releases":{ "internal":{ "promptflow-tools-release":{ "index": "internal-index-release" }, "promptflow-tools-test":{ "index": "internal-index-test" } } }, "targets": { "internal-index-release": { "storage_account": "azuremlsdktestpypi", "packages_container": "repo", "index_container": "wheels", "blob_prefix": "promptflow", "endpoint": "azuremlsdktestpypi.azureedge.net" }, "internal-index-test": { "storage_account": "azuremlsdktestpypi", "packages_container": "repo", "index_container": "wheels", "blob_prefix": "test-promptflow", "endpoint": "azuremlsdktestpypi.azureedge.net" } } }
0
promptflow_repo/promptflow/scripts/distributing
promptflow_repo/promptflow/scripts/distributing/configs/promptflow-tools-release-env.yaml
name: release-env channels: - defaults - conda-forge dependencies: - python=3.8 - pip - pip: - setuptools - twine==4.0.0 - azure-storage-blob==12.16.0
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/check_enforcer/check_enforcer.py
# Enforce the check of pipelines. # This script will get the diff of the current branch and main branch, calculate the pipelines that should be triggered. # Then it will check if the triggered pipelines are successful. This script will loop for 30*loop-times seconds at most. # How many checks are triggered: # 1. sdk checks: sdk_cli_tests, sdk_cli_azure_test, sdk_cli_global_config_tests are triggered. # 2. examples checks: this script calculate the path filters and decide what should be triggered. # Trigger checks and return the status of the checks: # 1. If examples are not correctly generated, fail. # 2. If required pipelines are not triggered within 6 rounds of loops, fail. # 2.1 (special_care global variable could help on some pipelines that need to bypass the check) # Check pipelines succeed or not: # 1. These pipelines should return status within loop-times rounds. # 2. If there is failed pipeline in the triggered pipelines, fail. # Import necessary libraries import os import fnmatch import subprocess import time import argparse import json import sys # Define variables github_repository = "microsoft/promptflow" snippet_debug = os.getenv("SNIPPET_DEBUG", 0) merge_commit = "" loop_times = 30 github_workspace = os.path.expanduser("~/promptflow/") # Special cases for pipelines that need to be triggered more or less than default value 1. # If 0, the pipeline will not be ignored in check enforcer. # Please notice that the key should be the Job Name in the pipeline. special_care = { "sdk_cli_tests": 4, "sdk_cli_azure_test": 4, # "samples_connections_connection": 0, } # Copy from original yaml pipelines checks = { "sdk_cli_tests": [ "src/promptflow/**", "scripts/building/**", ".github/workflows/promptflow-sdk-cli-test.yml", ], "sdk_cli_global_config_tests": [ "src/promptflow/**", "scripts/building/**", ".github/workflows/promptflow-global-config-test.yml", ], "sdk_cli_azure_test": [ "src/promptflow/**", "scripts/building/**", ".github/workflows/promptflow-sdk-cli-azure-test.yml", ], } reverse_checks = {} pipelines = {} pipelines_count = {} failed_reason = "" # Define functions def trigger_checks(valid_status_array): global failed_reason global github_repository global merge_commit global snippet_debug global pipelines global pipelines_count output = subprocess.check_output( f"gh api /repos/{github_repository}/commits/{merge_commit}/check-suites?per_page=100", shell=True, ) check_suites = json.loads(output)["check_suites"] for suite in check_suites: if snippet_debug != 0: print(f"check-suites id {suite['id']}") suite_id = suite["id"] output = subprocess.check_output( f"gh api /repos/{github_repository}/check-suites/{suite_id}/check-runs?per_page=100", shell=True, ) check_runs = json.loads(output)["check_runs"] for run in check_runs: if snippet_debug != 0: print(f"check runs name {run['name']}") for key in pipelines.keys(): value = pipelines[key] if value == 0: continue if key in run["name"]: pipelines_count[key] += 1 valid_status_array.append(run) for key in pipelines.keys(): if pipelines_count[key] < pipelines[key]: failed_reason = "Not all pipelines are triggered." def status_checks(valid_status_array): global failed_reason global pipelines global pipelines_count # Basic fact of sdk cli checked pipelines. failed_reason = "" # Loop through each valid status array. for status in valid_status_array: # Check if the pipeline was successful. if status["conclusion"] and status["conclusion"].lower() == "success": # Add 1 to the count of successful pipelines. pass # Check if the pipeline failed. elif status["conclusion"] and status["conclusion"].lower() == "failure": failed_reason = "Required pipelines are not successful." # Check if the pipeline is still running. else: if failed_reason == "": failed_reason = "Required pipelines are not finished." # Print the status of the pipeline to the console. print(status["name"] + " is checking.") def trigger_prepare(input_paths): global github_workspace global checks global reverse_checks global pipelines global pipelines_count global failed_reason global special_care for input_path in input_paths: if "samples_connections_connection" in checks: continue # Check if the input path contains "examples" or "samples". if "examples" in input_path or "samples" in input_path: sys.path.append(os.path.expanduser(github_workspace + "/scripts/readme")) from readme import main as readme_main os.chdir(os.path.expanduser(github_workspace)) # Get the list of pipelines from the readme file. pipelines_samples = readme_main(check=True) git_diff_files = [ item for item in subprocess.check_output( ["git", "diff", "--name-only", "HEAD"] ) .decode("utf-8") .split("\n") if item != "" ] for _ in git_diff_files: failed_reason = "Run readme generation before check in" return # Merge the pipelines from the readme file with the original list of pipelines. for key in pipelines_samples.keys(): value = pipelines_samples[key] checks[key] = value # Reverse checks. for key in checks.keys(): value = checks[key] for path in value: if path in reverse_checks: reverse_checks[path].append(key) else: reverse_checks[path] = [key] # Render pipelines and pipelines_count using input_paths. for input_path in input_paths: # Input pattern /**: input_path should match in the middle. # Input pattern /*: input_path should match last but one. # Other input pattern: input_path should match last. keys = [ key for key in reverse_checks.keys() if fnmatch.fnmatch(input_path, key) ] # Loop through each key in the list of keys. for key_item in keys: # Loop through each pipeline in the list of pipelines. for key in reverse_checks[key_item]: # Check if the pipeline is in the list of pipelines. if key in special_care: pipelines[key] = special_care[key] else: pipelines[key] = 1 # Set the pipeline count to 0. pipelines_count[key] = 0 def run_checks(): global github_repository global snippet_debug global merge_commit global loop_times global github_workspace global failed_reason if merge_commit == "": merge_commit = ( subprocess.check_output(["git", "log", "-1"]).decode("utf-8").split("\n") ) if snippet_debug != 0: print(merge_commit) for line in merge_commit: if "Merge" in line and "into" in line: merge_commit = line.split(" ")[-3] break if snippet_debug != 0: print("MergeCommit " + merge_commit) not_started_counter = 5 os.chdir(github_workspace) # Get diff of current branch and main branch. try: git_merge_base = ( subprocess.check_output(["git", "merge-base", "origin/main", "HEAD"]) .decode("utf-8") .rstrip() ) git_diff = ( subprocess.check_output( ["git", "diff", "--name-only", "--diff-filter=d", f"{git_merge_base}"], stderr=subprocess.STDOUT, ) .decode("utf-8") .rstrip() .split("\n") ) except subprocess.CalledProcessError as e: print("Exception on process, rc=", e.returncode, "output=", e.output) raise e # Prepare how many pipelines should be triggered. trigger_prepare(git_diff) if failed_reason != "": raise Exception(failed_reason) # Loop for 15 minutes at most. for i in range(loop_times): # Wait for 30 seconds. time.sleep(30) # Reset the failed reason. failed_reason = "" # Reset the valid status array. valid_status_array = [] # Get all triggered pipelines. # If not all pipelines are triggered, continue. trigger_checks(valid_status_array) if failed_reason != "": if not_started_counter == 0: raise Exception(failed_reason + " for 6 times.") print(failed_reason) not_started_counter -= 1 continue # Get pipeline conclusion priority: # 1. Not successful, Fail. # 2. Not finished, Continue. # 3. Successful, Break. status_checks(valid_status_array) # Check if the failed reason contains "not successful". if "not successful" in failed_reason.lower(): raise Exception(failed_reason) # Check if the failed reason contains "not finished". elif "not finished" in failed_reason.lower(): print(failed_reason) continue # Otherwise, print that all required pipelines are successful. else: print("All required pipelines are successful.") break # Check if the failed reason is not empty. if failed_reason != "": raise Exception(failed_reason) if __name__ == "__main__": # Run the checks. parser = argparse.ArgumentParser() parser.add_argument( "-m", "--merge-commit", help="merge commit sha", ) parser.add_argument( "-n", "--loop-times", type=int, help="Loop times", ) parser.add_argument( "-t", "--github-workspace", help="base path of github workspace", ) args = parser.parse_args() if args.merge_commit: merge_commit = args.merge_commit if args.loop_times: loop_times = args.loop_times if args.github_workspace: github_workspace = args.github_workspace run_checks()
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/runtime_mgmt/update-runtime.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import argparse import time from pathlib import Path import requests from azure.ai.ml import MLClient, load_environment from azure.identity import AzureCliCredential ENVIRONMENT_YAML = Path(__file__).parent / "runtime-env" / "env.yaml" EXAMPLE_RUNTIME_NAME = "example-runtime-ci" TEST_RUNTIME_NAME = "test-runtime-ci" class PFSRuntimeHelper: def __init__(self, ml_client: MLClient): subscription_id = ml_client._operation_scope.subscription_id resource_group_name = ml_client._operation_scope.resource_group_name workspace_name = ml_client._operation_scope.workspace_name location = ml_client.workspaces.get().location self._request_url_prefix = ( f"https://{location}.api.azureml.ms/flow/api/subscriptions/{subscription_id}" f"/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices" f"/workspaces/{workspace_name}/FlowRuntimes" ) token = ml_client._credential.get_token("https://management.azure.com/.default").token self._headers = {"Authorization": f"Bearer {token}"} def update_runtime(self, name: str, env_asset_id: str) -> None: body = { "runtimeDescription": "Runtime hosted on compute instance, serves for examples checks.", "environment": env_asset_id, "instanceCount": "", } response = requests.put( f"{self._request_url_prefix}/{name}", headers=self._headers, json=body, ) response.raise_for_status() def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser() parser.add_argument("--path", help="Path to config.json", type=str) return parser.parse_args() def init_ml_client( subscription_id: str, resource_group_name: str, workspace_name: str, ) -> MLClient: return MLClient( credential=AzureCliCredential(), subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, ) def create_environment(ml_client: MLClient) -> str: environment = load_environment(source=ENVIRONMENT_YAML) env = ml_client.environments.create_or_update(environment) # have observed delay between environment creation and asset id availability while True: try: ml_client.environments.get(name=env.name, version=env.version) break except Exception: time.sleep(10) # get workspace id from REST workspace object resource_group_name = ml_client._operation_scope.resource_group_name workspace_name = ml_client._operation_scope.workspace_name location = ml_client.workspaces.get().location workspace_id = ml_client._workspaces._operation.get( resource_group_name=resource_group_name, workspace_name=workspace_name ).workspace_id # concat environment asset id asset_id = ( f"azureml://locations/{location}/workspaces/{workspace_id}" f"/environments/{env.name}/versions/{env.version}" ) return asset_id def main(args: argparse.Namespace): subscription_id, resource_group_name, workspace_name = MLClient._get_workspace_info(args.path) ml_client = init_ml_client( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, ) pfs_runtime_helper = PFSRuntimeHelper(ml_client=ml_client) print("creating environment...") env_asset_id = create_environment(ml_client=ml_client) print("created environment, asset id:", env_asset_id) print("updating runtime for test...") pfs_runtime_helper.update_runtime(name=TEST_RUNTIME_NAME, env_asset_id=env_asset_id) print("updating runtime for example...") pfs_runtime_helper.update_runtime(name=EXAMPLE_RUNTIME_NAME, env_asset_id=env_asset_id) print("runtime updated!") if __name__ == "__main__": main(args=parse_args())
0
promptflow_repo/promptflow/scripts/runtime_mgmt
promptflow_repo/promptflow/scripts/runtime_mgmt/runtime-env/env.yaml
$schema: https://azuremlschemas.azureedge.net/latest/environment.schema.json name: chat-with-pdf build: path: context inference_config: liveness_route: port: 8080 path: /health readiness_route: port: 8080 path: /health scoring_route: port: 8080 path: /score
0
promptflow_repo/promptflow/scripts/runtime_mgmt/runtime-env
promptflow_repo/promptflow/scripts/runtime_mgmt/runtime-env/context/Dockerfile
FROM mcr.microsoft.com/azureml/promptflow/promptflow-runtime:latest COPY ./requirements.txt ./ RUN pip install --no-cache-dir -r requirements.txt
0
promptflow_repo/promptflow/scripts/runtime_mgmt/runtime-env
promptflow_repo/promptflow/scripts/runtime_mgmt/runtime-env/context/requirements.txt
PyPDF2 faiss-cpu openai jinja2 python-dotenv tiktoken
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/compliance-check/user_exclusion.xml
<PoliCheckExclusions> <!-- All strings must be UPPER CASE --> <!--index-xxx.js is an auto-generated javascript file - skipped given it's not expected to be readable --> <Exclusion Type="FileName">SRC\PROMPTFLOW\PROMPTFLOW\_SDK\_SERVING\STATIC\INDEX.JS</Exclusion> </PoliCheckExclusions>
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/compliance-check/Check-PolicheckScan.ps1
# Copyright (C) Microsoft Corporation. All rights reserved. <# .SYNOPSIS Check Policheck Scan result. .DESCRIPTION Helper script to check the Policheck result. If there is policheck failure, show the error and throw exception. #> [CmdLetbinding()] param ( [string]$policheckResult, [string]$raiseError = $true ) $result = Get-Content -Path $policheckResult | Measure-Object -Line; Write-Host("Number of errors found in this scan: " + ($result.Lines - 1)); if ($raiseError -and ($result.Lines -gt 1)) { Get-Content -Path $policheckResult; throw "Policheck scan completed successfully but there are issues to fix."; } # Read-Host "Press enter to finish the process and close this window";
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/release/promptflow-release-note.md
We are pleased to announce the release of promptflow {{VERSION}}. This release includes some new features, bug fixes, and improvements. We recommend that all users upgrade to this version. See the [CHANGELOG](https://github.com/microsoft/promptflow/blob/release/promptflow/{{VERSION}}/src/promptflow/CHANGELOG.md) for a list of all the changes. The release will be available via PyPI: ```bash pip install --upgrade promptflow ``` Please report any issues with the release on the [promptflow issue tracker](https://github.com/microsoft/promptflow/issues). Thanks to all the contributors who made this release possible.
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/tool/generate_tool_package_template.py
import argparse import os import re from jinja2 import Environment, FileSystemLoader def make_pythonic_variable_name(input_string): variable_name = input_string.strip() variable_name = re.sub(r'\W|^(?=\d)', '_', variable_name) if not variable_name[0].isalpha() and variable_name[0] != '_': variable_name = f'_{variable_name}' return variable_name def convert_tool_name_to_class_name(tool_name): return ''.join(word.title() for word in tool_name.split('_')) def create_file(path): with open(path, 'w'): pass def create_folder(path): os.makedirs(path, exist_ok=True) def create_tool_project_structure(destination: str, package_name: str, tool_name: str, function_name: str, is_class_way=False): if is_class_way: class_name = convert_tool_name_to_class_name(tool_name) # Load templates templates_abs_path = os.path.join(os.path.dirname(__file__), "templates") file_loader = FileSystemLoader(templates_abs_path) env = Environment(loader=file_loader) # Create new directory if os.path.exists(destination): print("Destination already exists. Please choose another one.") return os.makedirs(destination, exist_ok=True) # Generate setup.py template = env.get_template('setup.py.j2') output = template.render(package_name=package_name, tool_name=tool_name) with open(os.path.join(destination, 'setup.py'), 'w') as f: f.write(output) # Generate MANIFEST.in template = env.get_template('MANIFEST.in.j2') output = template.render(package_name=package_name) with open(os.path.join(destination, 'MANIFEST.in'), 'w') as f: f.write(output) # Create tools folder and __init__.py, tool.py inside it tools_dir = os.path.join(destination, package_name, 'tools') create_folder(tools_dir) create_file(os.path.join(tools_dir, '__init__.py')) with open(os.path.join(tools_dir, '__init__.py'), 'w') as f: f.write('__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore\n') # Generate tool.py if is_class_way: template = env.get_template('tool2.py.j2') output = template.render(class_name=class_name, function_name=function_name) else: template = env.get_template('tool.py.j2') output = template.render(function_name=function_name) with open(os.path.join(tools_dir, f'{tool_name}.py'), 'w') as f: f.write(output) # Generate utils.py template = env.get_template('utils.py.j2') output = template.render() with open(os.path.join(tools_dir, 'utils.py'), 'w') as f: f.write(output) create_file(os.path.join(destination, package_name, '__init__.py')) with open(os.path.join(destination, package_name, '__init__.py'), 'w') as f: f.write('__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore\n') # Create yamls folder and __init__.py inside it yamls_dir = os.path.join(destination, package_name, 'yamls') create_folder(yamls_dir) # Create tool yaml if is_class_way: template = env.get_template('tool2.yaml.j2') output = template.render(package_name=package_name, tool_name=tool_name, class_name=class_name, function_name=function_name) else: template = env.get_template('tool.yaml.j2') output = template.render(package_name=package_name, tool_name=tool_name, function_name=function_name) with open(os.path.join(yamls_dir, f'{tool_name}.yaml'), 'w') as f: f.write(output) # Create test folder and __init__.py inside it tests_dir = os.path.join(destination, 'tests') create_folder(tests_dir) create_file(os.path.join(tests_dir, '__init__.py')) # Create test_tool.py if is_class_way: template = env.get_template('test_tool2.py.j2') output = template.render(package_name=package_name, tool_name=tool_name, class_name=class_name, function_name=function_name) else: template = env.get_template('test_tool.py.j2') output = template.render(package_name=package_name, tool_name=tool_name, function_name=function_name) with open(os.path.join(tests_dir, f'test_{tool_name}.py'), 'w') as f: f.write(output) print(f'Generated tool package template for {package_name} at {destination}') if __name__ == "__main__": parser = argparse.ArgumentParser(description="promptflow tool template generation arguments.") parser.add_argument("--package-name", "-p", type=str, help="your tool package's name", required=True) parser.add_argument("--destination", "-d", type=str, help="target folder you want to place the generated template", required=True) parser.add_argument("--tool-name", "-t", type=str, help="your tool's name, by default is hello_world_tool", required=False) parser.add_argument("--function-name", "-f", type=str, help="your tool's function name, by default is your tool's name", required=False) parser.add_argument("--use-class", action='store_true', help="Specify whether to use a class implementation way.") args = parser.parse_args() destination = args.destination package_name = make_pythonic_variable_name(args.package_name) package_name = package_name.lower() if args.tool_name: tool_name = make_pythonic_variable_name(args.tool_name) else: tool_name = 'hello_world_tool' tool_name = tool_name.lower() if args.function_name: function_name = make_pythonic_variable_name(args.function_name) else: function_name = tool_name function_name = function_name.lower() create_tool_project_structure(destination, package_name, tool_name, function_name, args.use_class)
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/tool/generate_package_tool_meta.py
import argparse import ast import importlib import json import os import sys from ruamel.yaml import YAML sys.path.append("src/promptflow-tools") sys.path.append(os.getcwd()) from utils.generate_tool_meta_utils import generate_custom_llm_tools_in_module_as_dict, generate_python_tools_in_module_as_dict # noqa: E402, E501 if __name__ == "__main__": parser = argparse.ArgumentParser(description="Generate meta for a tool.") parser.add_argument("--module", "-m", help="Module to generate tools.", type=str, required=True) parser.add_argument("--output", "-o", help="Path to the output tool json file.", required=True) parser.add_argument( "--tool-type", "-t", help="Provide tool type: 'python' or 'custom_llm'. By default, 'python' will be set as the tool type.", type=str, choices=["python", "custom_llm"], default="python", ) parser.add_argument( "--name", "-n", help="Provide a custom name for the tool. By default, the function name will be used as the tool name.", type=str, ) parser.add_argument("--description", "-d", help="Provide a brief description of the tool.", type=str) parser.add_argument( "--icon", "-i", type=str, help="your tool's icon image path, if you need to show different icons in dark and light mode, \n" "please use `icon-light` and `icon-dark` parameters. \n" "If these icon parameters are not provided, the system will use the default icon.", required=False) parser.add_argument( "--icon-light", type=str, help="your tool's icon image path for light mode, \n" "if you need to show the same icon in dark and light mode, please use `icon` parameter. \n" "If these icon parameters are not provided, the system will use the default icon.", required=False) parser.add_argument( "--icon-dark", type=str, help="your tool's icon image path for dark mode, \n" "if you need to show the same icon in dark and light mode, please use `icon` parameter. \n" "If these icon parameters are not provided, the system will use the default icon.", required=False) parser.add_argument( "--category", "-c", type=str, help="your tool's category, if not provided, the tool will be displayed under the root folder.", required=False) parser.add_argument( "--tags", type=ast.literal_eval, help="your tool's tags. It should be a dictionary-like string, e.g.: --tags \"{'tag1':'v1','tag2':'v2'}\".", required=False) args = parser.parse_args() m = importlib.import_module(args.module) icon = "" if args.icon: if args.icon_light or args.icon_dark: raise ValueError("You cannot provide both `icon` and `icon-light` or `icon-dark`.") from convert_image_to_data_url import check_image_type_and_generate_data_url # noqa: E402 icon = check_image_type_and_generate_data_url(args.icon) elif args.icon_light or args.icon_dark: if args.icon_light: from convert_image_to_data_url import check_image_type_and_generate_data_url # noqa: E402 if isinstance(icon, dict): icon["light"] = check_image_type_and_generate_data_url(args.icon_light) else: icon = {"light": check_image_type_and_generate_data_url(args.icon_light)} if args.icon_dark: from convert_image_to_data_url import check_image_type_and_generate_data_url # noqa: E402 if isinstance(icon, dict): icon["dark"] = check_image_type_and_generate_data_url(args.icon_dark) else: icon = {"dark": check_image_type_and_generate_data_url(args.icon_dark)} if args.tool_type == "custom_llm": tools_dict = generate_custom_llm_tools_in_module_as_dict( m, name=args.name, description=args.description, icon=icon, category=args.category, tags=args.tags) else: tools_dict = generate_python_tools_in_module_as_dict( m, name=args.name, description=args.description, icon=icon, category=args.category, tags=args.tags) # The generated dict cannot be dumped as yaml directly since yaml cannot handle string enum. tools_dict = json.loads(json.dumps(tools_dict)) yaml = YAML() yaml.preserve_quotes = True yaml.indent(mapping=2, sequence=4, offset=2) with open(args.output, "w") as f: yaml.dump(tools_dict, f) print(f"Tools meta generated to '{args.output}'.")
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/tool/convert_image_to_data_url.py
import argparse import base64 import os import io from PIL import Image SUPPORT_IMAGE_TYPES = ["png", "jpg", "jpeg", "bmp"] def get_image_size(image_path): with Image.open(image_path) as img: width, height = img.size return width, height def get_image_storage_size(image_path): file_size_bytes = os.path.getsize(image_path) file_size_mb = file_size_bytes / (1024 * 1024) return file_size_mb def image_to_data_url(image_path): with open(image_path, "rb") as image_file: # Create a BytesIO object from the image file image_data = io.BytesIO(image_file.read()) # Open the image and resize it img = Image.open(image_data) if img.size != (16, 16): img = img.resize((16, 16), Image.Resampling.LANCZOS) # Save the resized image to a data URL buffered = io.BytesIO() img.save(buffered, format="PNG") img_str = base64.b64encode(buffered.getvalue()) data_url = 'data:image/png;base64,' + img_str.decode('utf-8') return data_url def create_html_file(data_uri, output_path): html_content = '<html>\n<body>\n<img src="{}" alt="My Image">\n</body>\n</html>'.format(data_uri) with open(output_path, 'w') as file: file.write(html_content) def check_image_type(image_path): file_extension = image_path.lower().split('.')[-1] if file_extension not in SUPPORT_IMAGE_TYPES: raise ValueError("Only png, jpg or bmp image types are supported.") def check_image_type_and_generate_data_url(image_path): check_image_type(image_path) return image_to_data_url(image_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--image-path", type=str, required=True, help="Your image input path", ) parser.add_argument( "--output", "-o", type=str, required=True, help="Your image output path", ) args = parser.parse_args() data_url = check_image_type_and_generate_data_url(args.image_path) print("Your image data uri: \n{}".format(data_url)) create_html_file(data_url, args.output)
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/tool/deploy_endpoint.py
import argparse from utils.repo_utils import create_remote_branch_in_ADO_with_new_tool_pkg_version, deploy_test_endpoint if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--tool_pkg_version", type=str, required=True) parser.add_argument("--ado_pat", type=str, required=True) args = parser.parse_args() print(f"Package version: {args.tool_pkg_version}") branch_name = create_remote_branch_in_ADO_with_new_tool_pkg_version(args.ado_pat, args.tool_pkg_version) deploy_test_endpoint(branch_name, ado_pat=args.ado_pat)
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/tool/validate_tool_secret.py
import argparse from utils.secret_manager import ( get_secret_client, init_used_secret_names, validate_secret_name, ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--tenant_id", type=str, required=True, help="The tenant id of the service principal", ) parser.add_argument( "--client_id", type=str, required=True, help="The client id of the service principal", ) parser.add_argument( "--client_secret", type=str, required=True, help="The client secret of the service principal", ) parser.add_argument( "--secret_name", type=str, required=True, ) args = parser.parse_args() secret_client = get_secret_client( args.tenant_id, args.client_id, args.client_secret ) init_used_secret_names(secret_client) validate_secret_name(args.secret_name)
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/tool/generate_connection_config.py
import argparse import json from pathlib import Path from utils.secret_manager import get_secret, get_secret_client, list_secret_names CONNECTION_FILE_NAME = "connections.json" PROMPTFLOW_TOOLS_ROOT = Path(__file__) / "../../../src/promptflow-tools" CONNECTION_TPL_FILE_PATH = PROMPTFLOW_TOOLS_ROOT / "connections.json.example" def fill_key_to_dict(template_dict, keys_dict): if not isinstance(template_dict, dict): return for key, val in template_dict.items(): if isinstance(val, str) and val in keys_dict: template_dict[key] = keys_dict[val] continue fill_key_to_dict(val, keys_dict) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--tenant_id", type=str, help="The tenant id of the service principal") parser.add_argument("--client_id", type=str, help="The client id of the service principal") parser.add_argument("--client_secret", type=str, help="The client secret of the service principal") parser.add_argument("--local", action='store_true', help="local debug mode") args = parser.parse_args() template_dict = json.loads(open(CONNECTION_TPL_FILE_PATH.resolve().absolute(), "r").read()) file_path = (PROMPTFLOW_TOOLS_ROOT / CONNECTION_FILE_NAME).resolve().absolute().as_posix() print(f"file_path: {file_path}") if not args.local: client = get_secret_client(tenant_id=args.tenant_id, client_id=args.client_id, client_secret=args.client_secret) all_secret_names = list_secret_names(client) data = {secret_name: get_secret(secret_name, client) for secret_name in all_secret_names} fill_key_to_dict(template_dict, data) with open(file_path, "w") as f: json.dump(template_dict, f)
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/tool/upload_tool_secret.py
import argparse from utils.secret_manager import get_secret_client, upload_secret if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--tenant_id", type=str, required=True, help="The tenant id of the service principal", ) parser.add_argument( "--client_id", type=str, required=True, help="The client id of the service principal", ) parser.add_argument( "--client_secret", type=str, required=True, help="The client secret of the service principal", ) parser.add_argument( "--secret_name", type=str, required=True, ) parser.add_argument( "--secret_value", type=str, required=True, ) args = parser.parse_args() secret_client = get_secret_client( args.tenant_id, args.client_id, args.client_secret ) upload_secret(secret_client, args.secret_name, args.secret_value)
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/exceptions/__init__.py
from .secret_exceptions import SecretNameAlreadyExistsException, SecretNameInvalidException, SecretNoSetPermissionException # noqa: F401, E501
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/exceptions/secret_exceptions.py
class SecretNameAlreadyExistsException(Exception): pass class SecretNameInvalidException(Exception): pass class SecretNoSetPermissionException(Exception): pass
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/utils/generate_tool_meta_utils.py
""" This file can generate a meta file for the given prompt template or a python file. """ import inspect import types from dataclasses import asdict from utils.tool_utils import function_to_interface from promptflow.contracts.tool import Tool, ToolType # Avoid circular dependencies: Use import 'from promptflow._internal' instead of 'from promptflow' # since the code here is in promptflow namespace as well from promptflow._internal import ToolProvider from promptflow.exceptions import ErrorTarget, UserErrorException def asdict_without_none(obj): return asdict(obj, dict_factory=lambda x: {k: v for (k, v) in x if v}) def asdict_with_advanced_features_without_none(obj, **advanced_features): dict_without_none = asdict_without_none(obj) dict_without_none.update({k: v for k, v in advanced_features.items() if v}) return dict_without_none def is_tool(f): if not isinstance(f, types.FunctionType): return False if not hasattr(f, "__tool"): return False return True def collect_tool_functions_in_module(m): tools = [] for _, obj in inspect.getmembers(m): if is_tool(obj): # Note that the tool should be in defined in exec but not imported in exec, # so it should also have the same module with the current function. if getattr(obj, "__module__", "") != m.__name__: continue tools.append(obj) return tools def collect_tool_methods_in_module(m): tools = [] for _, obj in inspect.getmembers(m): if isinstance(obj, type) and issubclass(obj, ToolProvider) and obj.__module__ == m.__name__: for _, method in inspect.getmembers(obj): if is_tool(method): initialize_inputs = obj.get_initialize_inputs() tools.append((method, initialize_inputs)) return tools def _parse_tool_from_function(f, initialize_inputs=None, tool_type=ToolType.PYTHON, name=None, description=None): if hasattr(f, "__tool") and isinstance(f.__tool, Tool): return f.__tool if hasattr(f, "__original_function"): f = f.__original_function try: inputs, _, _ = function_to_interface(f, tool_type=tool_type, initialize_inputs=initialize_inputs) except Exception as e: raise BadFunctionInterface(f"Failed to parse interface for tool {f.__name__}, reason: {e}") from e class_name = None if "." in f.__qualname__: class_name = f.__qualname__.replace(f".{f.__name__}", "") # Construct the Tool structure return Tool( name=name or f.__qualname__, description=description or inspect.getdoc(f), inputs=inputs, type=tool_type, class_name=class_name, function=f.__name__, module=f.__module__, ) def generate_python_tools_in_module(module, name, description): tool_functions = collect_tool_functions_in_module(module) tool_methods = collect_tool_methods_in_module(module) return [_parse_tool_from_function(f, name=name, description=description) for f in tool_functions] + [ _parse_tool_from_function(f, initialize_inputs, name=name, description=description) for (f, initialize_inputs) in tool_methods ] def generate_python_tools_in_module_as_dict(module, name=None, description=None, **advanced_features): tools = generate_python_tools_in_module(module, name, description) return _construct_tool_dict(tools, **advanced_features) def generate_custom_llm_tools_in_module(module, name, description): tool_functions = collect_tool_functions_in_module(module) tool_methods = collect_tool_methods_in_module(module) return [ _parse_tool_from_function(f, tool_type=ToolType.CUSTOM_LLM, name=name, description=description) for f in tool_functions ] + [ _parse_tool_from_function( f, initialize_inputs, tool_type=ToolType.CUSTOM_LLM, name=name, description=description ) for (f, initialize_inputs) in tool_methods ] def generate_custom_llm_tools_in_module_as_dict(module, name=None, description=None, **advanced_features): tools = generate_custom_llm_tools_in_module(module, name, description) return _construct_tool_dict(tools, **advanced_features) def _construct_tool_dict(tools, **advanced_features): return { f"{t.module}.{t.class_name}.{t.function}" if t.class_name is not None else f"{t.module}.{t.function}": asdict_with_advanced_features_without_none(t, **advanced_features) for t in tools } class ToolValidationError(UserErrorException): """Base exception raised when failed to validate tool.""" def __init__(self, message): super().__init__(message, target=ErrorTarget.TOOL) class PythonParsingError(ToolValidationError): pass class BadFunctionInterface(PythonParsingError): pass
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/utils/tool_utils.py
import inspect from enum import Enum, EnumMeta from typing import Callable, Union, get_args, get_origin from promptflow.contracts.tool import ConnectionType, InputDefinition, ValueType, ToolType from promptflow.contracts.types import PromptTemplate def value_to_str(val): if val is inspect.Parameter.empty: # For empty case, default field will be skipped when dumping to json return None if val is None: # Dump default: "" in json to avoid UI validation error return "" if isinstance(val, Enum): return val.value return str(val) def resolve_annotation(anno) -> Union[str, list]: """Resolve the union annotation to type list.""" origin = get_origin(anno) if origin != Union: return anno # Optional[Type] is Union[Type, NoneType], filter NoneType out args = [arg for arg in get_args(anno) if arg != type(None)] # noqa: E721 return args[0] if len(args) == 1 else args def param_to_definition(param, value_type) -> (InputDefinition, bool): default_value = param.default enum = None custom_type = None # Get value type and enum from default if no annotation if default_value is not inspect.Parameter.empty and value_type == inspect.Parameter.empty: value_type = default_value.__class__ if isinstance(default_value, Enum) else type(default_value) # Extract enum for enum class if isinstance(value_type, EnumMeta): enum = [str(option.value) for option in value_type] value_type = str is_connection = False if ConnectionType.is_connection_value(value_type): if ConnectionType.is_custom_strong_type(value_type): typ = ["CustomConnection"] custom_type = [value_type.__name__] else: typ = [value_type.__name__] is_connection = True elif isinstance(value_type, list): if not all(ConnectionType.is_connection_value(t) for t in value_type): typ = [ValueType.OBJECT] else: custom_connection_added = False typ = [] custom_type = [] for t in value_type: if ConnectionType.is_custom_strong_type(t): if not custom_connection_added: custom_connection_added = True typ.append("CustomConnection") custom_type.append(t.__name__) else: typ.append(t.__name__) is_connection = True else: typ = [ValueType.from_type(value_type)] return InputDefinition(type=typ, default=value_to_str(default_value), description=None, enum=enum, custom_type=custom_type), is_connection def function_to_interface(f: Callable, tool_type, initialize_inputs=None) -> tuple: sign = inspect.signature(f) all_inputs = {} input_defs = {} connection_types = [] # Initialize the counter for prompt template prompt_template_count = 0 # Collect all inputs from class and func if initialize_inputs: if any(k for k in initialize_inputs if k in sign.parameters): raise Exception(f'Duplicate inputs found from {f.__name__!r} and "__init__()"!') all_inputs = {**initialize_inputs} all_inputs.update( { k: v for k, v in sign.parameters.items() if k != "self" and v.kind != v.VAR_KEYWORD and v.kind != v.VAR_POSITIONAL # TODO: Handle these cases } ) # Resolve inputs to definitions. for k, v in all_inputs.items(): # Get value type from annotation value_type = resolve_annotation(v.annotation) if value_type is PromptTemplate: # custom llm tool has prompt template as input, skip it prompt_template_count += 1 continue input_def, is_connection = param_to_definition(v, value_type) input_defs[k] = input_def if is_connection: connection_types.append(input_def.type) # Check PromptTemplate input: # a. For custom llm tool, there should be exactly one PromptTemplate input # b. For python tool, PromptTemplate input is not supported if tool_type == ToolType.PYTHON and prompt_template_count > 0: raise Exception(f"Input of type 'PromptTemplate' not supported in python tool '{f.__name__}'. ") if tool_type == ToolType.CUSTOM_LLM and prompt_template_count == 0: raise Exception(f"No input of type 'PromptTemplate' was found in custom llm tool '{f.__name__}'. ") if tool_type == ToolType.CUSTOM_LLM and prompt_template_count > 1: raise Exception(f"Multiple inputs of type 'PromptTemplate' were found in '{f.__name__}'. " "Only one input of this type is expected.") outputs = {} # Note: We don't have output definition now # outputs = {"output": OutputDefinition("output", [ValueType.from_type(type(sign.return_annotation))], "", True)} # if is_dataclass(sign.return_annotation): # for f in fields(sign.return_annotation): # outputs[f.name] = OutputDefinition(f.name, [ValueType.from_type( # type(getattr(sign.return_annotation, f.name)))], "", False) return input_defs, outputs, connection_types
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/utils/repo_utils.py
import json import os import shutil import subprocess from datetime import datetime from pathlib import Path import requests scripts_dir = os.path.join(os.getcwd(), "scripts") index_url = "https://azuremlsdktestpypi.azureedge.net/test-promptflow/promptflow-tools" ado_promptflow_repo_url_format = "https://{0}@dev.azure.com/msdata/Vienna/_git/PromptFlow" def replace_lines_from_file_under_hint(file_path, hint: str, lines_to_replace: list): lines_count = len(lines_to_replace) with open(file_path, "r") as f: lines = f.readlines() has_hint = False for i in range(len(lines)): if lines[i].strip() == hint: has_hint = True lines[i + 1 : i + 1 + lines_count] = lines_to_replace if not has_hint: lines.append(hint + "\n") lines += lines_to_replace with open(file_path, "w") as f: f.writelines(lines) def create_remote_branch_in_ADO_with_new_tool_pkg_version( ado_pat: str, tool_pkg_version: str, blob_prefix="test-promptflow" ) -> str: # Clone the Azure DevOps repo parent_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir)) tmp_dir = os.path.join(parent_dir, "temp") if not os.path.exists(tmp_dir): os.mkdir(tmp_dir) subprocess.run(["git", "config", "--global", "user.email", "[email protected]"]) subprocess.run(["git", "config", "--global", "user.name", "github-promptflow"]) # Change directory to the 'tmp' directory os.chdir(tmp_dir) repo_dir = os.path.join(tmp_dir, "PromptFlow") repo_url = ado_promptflow_repo_url_format.format(ado_pat) subprocess.run(["git", "clone", repo_url, repo_dir]) # Change directory to the repo directory os.chdir(repo_dir) # Pull the devs/test branch subprocess.run(["git", "reset", "."]) subprocess.run(["git", "checkout", "."]) subprocess.run(["git", "clean", "-f", "."]) subprocess.run(["git", "checkout", "main"]) subprocess.run(["git", "fetch"]) subprocess.run(["git", "pull"]) # Make changes # 1. add test endpoint 'promptflow-gallery-tool-test.yaml' # 2. update tool package version source_file = Path(scripts_dir) / "tool/utils/configs/promptflow-gallery-tool-test.yaml" destination_folder = "deploy/model" shutil.copy(source_file, destination_folder) new_lines = [ f"--extra-index-url https://azuremlsdktestpypi.azureedge.net/{blob_prefix}\n", f"promptflow_tools=={tool_pkg_version}\n", ] replace_lines_from_file_under_hint( file_path="docker_build/linux/extra_requirements.txt", hint="# Prompt-flow tool package", lines_to_replace=new_lines, ) # Create a new remote branch new_branch_name = f"devs/test_tool_pkg_{tool_pkg_version}_{datetime.now().strftime('%Y%m%d%H%M%S')}" subprocess.run(["git", "branch", "-D", "origin", new_branch_name]) subprocess.run(["git", "checkout", "-b", new_branch_name]) subprocess.run(["git", "add", "."]) subprocess.run(["git", "commit", "-m", f"Update tool package version to {tool_pkg_version}"]) subprocess.run(["git", "push", "-u", repo_url, new_branch_name]) return new_branch_name def deploy_test_endpoint(branch_name: str, ado_pat: str): # PromptFlow-deploy-endpoint pipeline in ADO: https://msdata.visualstudio.com/Vienna/_build?definitionId=24767&_a=summary # noqa: E501 url = "https://dev.azure.com/msdata/Vienna/_apis/pipelines/24767/runs?api-version=7.0-preview.1" request_body_file = Path(scripts_dir) / "tool/utils/configs/deploy-endpoint-request-body.json" with open(request_body_file, "r") as f: body = json.load(f) body["resources"]["repositories"]["self"]["refName"] = f"refs/heads/{branch_name}" print(f"request body: {body}") response = requests.post(url, json=body, auth=("dummy_user_name", ado_pat)) print(response.status_code) print(response.content)
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/utils/secret_manager.py
import re from azure.core.exceptions import HttpResponseError, ResourceExistsError from azure.identity import ClientSecretCredential from azure.keyvault.secrets import SecretClient from exceptions import ( SecretNameAlreadyExistsException, SecretNameInvalidException, SecretNoSetPermissionException, ) key_vault_name = "github-promptflow" container_name = "tools" KVUri = f"https://{key_vault_name}.vault.azure.net" def init_used_secret_names(client: SecretClient): global reserved_secret_names reserved_secret_names = list_secret_names(client) def get_secret_client( tenant_id: str, client_id: str, client_secret: str ) -> SecretClient: credential = ClientSecretCredential(tenant_id, client_id, client_secret) client = SecretClient(vault_url=KVUri, credential=credential) return client reserved_secret_names = [] def get_secret(secret_name: str, client: SecretClient): secret = client.get_secret(secret_name) return secret.value def list_secret_names(client: SecretClient) -> list: secret_properties = client.list_properties_of_secrets() return [secret.name for secret in secret_properties] def validate_secret_name(secret_name: str): # Check if secret name is valid. Secret name can only contain alphanumeric characters and dashes. pattern = "^[a-zA-Z0-9-]+$" if not re.match(pattern, secret_name): raise SecretNameInvalidException( "Secret name can only contain alphanumeric characters and dashes" ) # Check if secret name is one of the reserved names if secret_name in reserved_secret_names: raise SecretNameAlreadyExistsException( f"Secret name {secret_name} already exists" ) def upload_secret(client: SecretClient, secret_name: str, secret_value: str): try: client.set_secret(secret_name, secret_value) except ResourceExistsError as ex: if "in a deleted but recoverable state" in str(ex): raise SecretNameAlreadyExistsException( f"Secret name {secret_name} is deleted but recoverable, and its name cannot be reused" ) except HttpResponseError as ex: if ( ex.status_code == 403 and "does not have secrets set permission on key vault" in str(ex) ): raise SecretNoSetPermissionException( f"No set permission on key vault {key_vault_name}" ) print("Done.")
0
promptflow_repo/promptflow/scripts/tool/utils
promptflow_repo/promptflow/scripts/tool/utils/configs/promptflow-gallery-tool-test.yaml
storage: storage_account: promptflowgall5817910653 deployment: subscription_id: 96aede12-2f73-41cb-b983-6d11a904839b resource_group: promptflow workspace_name: promptflow-gallery endpoint_name: tool-test638236049123389546 deployment_name: blue mt_service_endpoint: https://eastus2euap.api.azureml.ms
0
promptflow_repo/promptflow/scripts/tool/utils
promptflow_repo/promptflow/scripts/tool/utils/configs/deploy-endpoint-request-body.json
{ "stagesToSkip": [], "resources": { "repositories": { "self": { "refName": "refs/heads/dev-branch" } } }, "templateParameters": { "deployEndpoint": "True" }, "variables": { "model-file": { "value": "promptflow-gallery-tool-test.yaml", "isSecret": false } } }
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/templates/test_tool2.py.j2
import pytest import unittest from {{ package_name }}.tools.{{ tool_name }} import {{ class_name }} @pytest.fixture def my_url() -> str: my_url = "https://www.bing.com" return my_url @pytest.fixture def my_tool_provider(my_url) -> {{ class_name }}: my_tool_provider = {{ class_name }}(my_url) return my_tool_provider class TestTool: def test_{{ tool_name }}(self, my_tool_provider): result = my_tool_provider.{{ function_name }}(query="Microsoft") assert result == "Hello Microsoft" # Run the unit tests if __name__ == "__main__": unittest.main()
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/templates/utils.py.j2
from ruamel.yaml import YAML from pathlib import Path def collect_tools_from_directory(base_dir) -> dict: tools = {} yaml = YAML() for f in Path(base_dir).glob("**/*.yaml"): with open(f, "r") as f: tools_in_file = yaml.load(f) for identifier, tool in tools_in_file.items(): tools[identifier] = tool return tools def list_package_tools(): """List package tools""" yaml_dir = Path(__file__).parents[1] / "yamls" return collect_tools_from_directory(yaml_dir)
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/templates/tool.py.j2
from promptflow import tool from promptflow.connections import CustomConnection @tool def {{ function_name }}(connection: CustomConnection, input_text: str) -> str: # Replace with your tool code. # Usually connection contains configs to connect to an API. # Use CustomConnection is a dict. You can use it like: connection.api_key, connection.api_base # Not all tools need a connection. You can remove it if you don't need it. return "Hello " + input_text
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/templates/tool.yaml.j2
{{ package_name }}.tools.{{ tool_name }}.{{ function_name }}: function: {{ function_name }} inputs: connection: type: - CustomConnection input_text: type: - string module: {{ package_name }}.tools.{{ tool_name }} name: Hello World Tool description: This is hello world tool type: python
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/templates/tool2.py.j2
from promptflow import ToolProvider, tool import urllib.request class {{ class_name }}(ToolProvider): def __init__(self, url: str): super().__init__() # Load content from url might be slow, so we do it in __init__ method to make sure it is loaded only once. self.content = urllib.request.urlopen(url).read() @tool def {{ function_name }}(self, query: str) -> str: # Replace with your tool code. return "Hello " + query
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/templates/MANIFEST.in.j2
include {{ package_name }}/yamls/*.yaml
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/templates/test_tool.py.j2
import pytest import unittest from promptflow.connections import CustomConnection from {{ package_name }}.tools.{{ tool_name }} import {{ function_name }} @pytest.fixture def my_custom_connection() -> CustomConnection: my_custom_connection = CustomConnection( { "api-key" : "my-api-key", "api-secret" : "my-api-secret", "api-url" : "my-api-url" } ) return my_custom_connection class TestTool: def test_{{ function_name }}(self, my_custom_connection): result = {{ function_name }}(my_custom_connection, input_text="Microsoft") assert result == "Hello Microsoft" # Run the unit tests if __name__ == "__main__": unittest.main()
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/templates/setup.py.j2
from setuptools import find_packages, setup PACKAGE_NAME = "{{ package_name }}" setup( name=PACKAGE_NAME, version="0.0.1", description="This is my tools package", packages=find_packages(), entry_points={ "package_tools": ["{{ tool_name }} = {{ package_name }}.tools.utils:list_package_tools"], }, include_package_data=True, # This line tells setuptools to include files from MANIFEST.in )
0
promptflow_repo/promptflow/scripts/tool
promptflow_repo/promptflow/scripts/tool/templates/tool2.yaml.j2
{{ package_name }}.tools.{{ tool_name }}.{{ class_name }}.{{ function_name }}: class_name: {{ class_name }} function: {{ function_name }} inputs: url: type: - string query: type: - string module: {{ package_name }}.tools.{{ tool_name }} name: Hello World Tool description: This is hello world tool type: python
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/docs/doc_generation.ps1
<# .DESCRIPTION Script to build doc site .EXAMPLE PS> ./doc_generation.ps1 -SkipInstall # skip pip install PS> ./doc_generation.ps1 -BuildLinkCheck -WarningAsError:$true -SkipInstall #> [CmdletBinding()] param( [switch]$SkipInstall, [switch]$WarningAsError = $false, [switch]$BuildLinkCheck = $false, [switch]$WithReferenceDoc = $false ) [string] $ScriptPath = $PSCommandPath | Split-Path -Parent [string] $RepoRootPath = $ScriptPath | Split-Path -Parent | Split-Path -Parent [string] $DocPath = [System.IO.Path]::Combine($RepoRootPath, "docs") [string] $TempDocPath = New-TemporaryFile | % { Remove-Item $_; New-Item -ItemType Directory -Path $_ } [string] $PkgSrcPath = [System.IO.Path]::Combine($RepoRootPath, "src\promptflow\promptflow") [string] $OutPath = [System.IO.Path]::Combine($ScriptPath, "_build") [string] $SphinxApiDoc = [System.IO.Path]::Combine($DocPath, "sphinx_apidoc.log") [string] $SphinxBuildDoc = [System.IO.Path]::Combine($DocPath, "sphinx_build.log") [string] $WarningErrorPattern = "WARNING:|ERROR:|CRITICAL:" $apidocWarningsAndErrors = $null $buildWarningsAndErrors = $null if (-not $SkipInstall){ # Prepare doc generation packages pip install pydata-sphinx-theme==0.11.0 pip install sphinx==5.1 pip install sphinx-copybutton==0.5.0 pip install sphinx_design==0.3.0 pip install sphinx-sitemap==2.2.0 pip install sphinx-togglebutton==0.3.2 pip install sphinxext-rediraffe==0.2.7 pip install sphinxcontrib-mermaid==0.8.1 pip install ipython-genutils==0.2.0 pip install myst-nb==0.17.1 pip install numpydoc==1.5.0 pip install myst-parser==0.18.1 pip install matplotlib==3.4.3 pip install jinja2==3.0.1 Write-Host "===============Finished install requirements===============" } function ProcessFiles { # Exclude files not mean to be in doc site $exclude_files = "README.md", "dev" foreach ($f in $exclude_files) { $full_path = [System.IO.Path]::Combine($TempDocPath, $f) Remove-Item -Path $full_path -Recurse } } Write-Host "===============PreProcess Files===============" Write-Host "Copy doc to: $TempDocPath" ROBOCOPY $DocPath $TempDocPath /S /NFL /NDL /XD "*.git" [System.IO.Path]::Combine($DocPath, "_scripts\_build") ProcessFiles if($WithReferenceDoc){ $RefDocRelativePath = "reference\python-library-reference" $RefDocPath = [System.IO.Path]::Combine($TempDocPath, $RefDocRelativePath) if(!(Test-Path $RefDocPath)){ throw "Reference doc path not found. Please make sure '$RefDocRelativePath' is under '$DocPath'" } Remove-Item $RefDocPath -Recurse -Force Write-Host "===============Build Promptflow Reference Doc===============" sphinx-apidoc --module-first --no-headings --no-toc --implicit-namespaces "$PkgSrcPath" -o "$RefDocPath" | Tee-Object -FilePath $SphinxApiDoc $apidocWarningsAndErrors = Select-String -Path $SphinxApiDoc -Pattern $WarningErrorPattern Write-Host "=============== Overwrite promptflow.connections.rst ===============" # We are doing this overwrite because the connection entities are also defined in the promptflow.entities module # and it will raise duplicate object description error if we don't do so when we run sphinx-build later. $ConnectionRst = [System.IO.Path]::Combine($RepoRootPath, "scripts\docs\promptflow.connections.rst") $AutoGenConnectionRst = [System.IO.Path]::Combine($RefDocPath, "promptflow.connections.rst") Copy-Item -Path $ConnectionRst -Destination $AutoGenConnectionRst -Force } Write-Host "===============Build Documentation with internal=${Internal}===============" $BuildParams = [System.Collections.ArrayList]::new() if($WarningAsError){ $BuildParams.Add("-W") $BuildParams.Add("--keep-going") } if($BuildLinkCheck){ $BuildParams.Add("-blinkcheck") } sphinx-build $TempDocPath $OutPath -c $ScriptPath $BuildParams | Tee-Object -FilePath $SphinxBuildDoc $buildWarningsAndErrors = Select-String -Path $SphinxBuildDoc -Pattern $WarningErrorPattern Write-Host "Clean path: $TempDocPath" Remove-Item $TempDocPath -Recurse -Confirm:$False -Force if ($apidocWarningsAndErrors) { Write-Host "=============== API doc warnings and errors ===============" foreach ($line in $apidocWarningsAndErrors) { Write-Host $line -ForegroundColor Red } } if ($buildWarningsAndErrors) { Write-Host "=============== Build warnings and errors ===============" foreach ($line in $buildWarningsAndErrors) { Write-Host $line -ForegroundColor Red } }
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/docs/promptflow.connections.rst
promptflow.connections package ============================== .. autoclass:: promptflow.connections.AzureContentSafetyConnection :members: :undoc-members: :show-inheritance: :noindex: .. autoclass:: promptflow.connections.AzureOpenAIConnection :members: :undoc-members: :show-inheritance: :noindex: .. autoclass:: promptflow.connections.CognitiveSearchConnection :members: :undoc-members: :show-inheritance: :noindex: .. autoclass:: promptflow.connections.CustomConnection :members: :undoc-members: :show-inheritance: :noindex: .. autoclass:: promptflow.connections.FormRecognizerConnection :members: :undoc-members: :show-inheritance: :noindex: .. autoclass:: promptflow.connections.OpenAIConnection :members: :undoc-members: :show-inheritance: :noindex: .. autoclass:: promptflow.connections.SerpConnection :members: :undoc-members: :show-inheritance: :noindex:
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/docs/conf.py
# -- Path setup -------------------------------------------------------------- import sys # -- Project information ----------------------------------------------------- project = 'Prompt flow' copyright = '2023, Microsoft' author = 'Microsoft' sys.path.append(".") from gallery_directive import GalleryDirective # noqa: E402 # -- General configuration --------------------------------------------------- extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.todo", "sphinxext.rediraffe", "sphinx_design", "sphinx_copybutton", "matplotlib.sphinxext.plot_directive", "sphinx_togglebutton", 'myst_parser', "sphinx.builders.linkcheck", ] # -- Internationalization ------------------------------------------------ # specifying the natural language populates some key tags language = "en" # specify charset as utf-8 to accept chinese punctuation charset_type = "utf-8" autosummary_generate = True # Add any paths that contain templates here, relative to this directory. # templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [ "_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints", "**.py", "**.yml", "**.ipynb", "**.sh", "**.zip", "**.skip" ] # Options for the linkcheck builder linkcheck_ignore = [ r"https://platform\.openai\.com/", r"https://help\.openai\.com/", # These are used in card links, for example 'xx.html', .md can't be resolved. r"^(?!https?)", "deploy-using-docker.html", "deploy-using-kubernetes.html", ] linkcheck_exclude_documents = ["contributing"] # -- Extension options ------------------------------------------------------- # This allows us to use ::: to denote directives, useful for admonitions myst_enable_extensions = ["colon_fence", "substitution"] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "pydata_sphinx_theme" html_logo = "_static/logo.svg" html_favicon = "_static/logo32.ico" html_sourcelink_suffix = "" html_show_sourcelink = False # Define the json_url for our version switcher. html_theme_options = { "github_url": "https://github.com/microsoft/promptflow", "header_links_before_dropdown": 6, "icon_links": [ { "name": "PyPI", "url": "https://pypi.org/project/promptflow/", "icon": "fa-solid fa-box", }, ], "logo": { "text": "Prompt flow", "alt_text": "Prompt flow", }, "use_edit_page_button": True, "show_toc_level": 1, "navbar_align": "left", # [left, content, right] For testing that the navbar items align properly "navbar_center": ["navbar-nav"], "announcement": "Prompt flow supports OpenAI 1.x since v1.1.0. This may introduce breaking change. Reach " "<a href='https://microsoft.github.io/promptflow/how-to-guides/faq.html#openai-1-x-support'>here</a> " "for guide to upgrade.", "show_nav_level": 1, } html_sidebars = { # "quick_start/README.md": ['localtoc.html', 'relations.html', 'searchbox.html'], # "examples/persistent-search-field": ["search-field"], # Blog sidebars # ref: https://ablog.readthedocs.io/manual/ablog-configuration-options/#blog-sidebars "features": ['localtoc.html', 'relations.html', 'searchbox.html'], # "tutorials": ['localtoc.html', 'relations.html', 'searchbox.html'], } html_context = { "default_mode": "light", "github_user": "", "github_repo": "microsoft/promptflow", "github_version": "main", "doc_path": "docs", } rediraffe_redirects = { } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] html_css_files = ["custom.css"] html_js_files = ['custom.js'] todo_include_todos = True # myst reference config myst_heading_anchors = 5 def setup(app): # Add the gallery directive app.add_directive("gallery-grid", GalleryDirective)
0
promptflow_repo/promptflow/scripts/docs
promptflow_repo/promptflow/scripts/docs/gallery_directive/__init__.py
"""A directive to generate a gallery of images from structured data. Generating a gallery of images that are all the same size is a common pattern in documentation, and this can be cumbersome if the gallery is generated programmatically. This directive wraps this particular use-case in a helper-directive to generate it with a single YAML configuration file. It currently exists for maintainers of the pydata-sphinx-theme, but might be abstracted into a standalone package if it proves useful. """ from yaml import safe_load from typing import List from pathlib import Path from docutils import nodes from docutils.parsers.rst import directives from sphinx.util.docutils import SphinxDirective from sphinx.util import logging logger = logging.getLogger(__name__) TEMPLATE_GRID = """ `````{{grid}} {grid_columns} {container_options} {content} ````` """ GRID_CARD = """ ````{{grid-item-card}} {title} {card_options} {content} ```` """ class GalleryDirective(SphinxDirective): """A directive to show a gallery of images and links in a grid.""" name = "gallery-grid" has_content = True required_arguments = 0 optional_arguments = 1 final_argument_whitespace = True option_spec = { # A class to be added to the resulting container "grid-columns": directives.unchanged, "class-container": directives.unchanged, "class-card": directives.unchanged, } def run(self) -> List[nodes.Node]: # noqa: C901 if self.arguments: # If an argument is given, assume it's a path to a YAML file # Parse it and load it into the directive content path_data_rel = Path(self.arguments[0]) path_doc, _ = self.get_source_info() path_doc = Path(path_doc).parent path_data = (path_doc / path_data_rel).resolve() if not path_data.exists(): logger.warn(f"Could not find grid data at {path_data}.") nodes.text("No grid data found at {path_data}.") return yaml_string = path_data.read_text() else: yaml_string = "\n".join(self.content) # Read in YAML so we can generate the gallery grid_data = safe_load(yaml_string) grid_items = [] for item in grid_data: # Grid card parameters options = {} if "website" in item: options["link"] = item["website"] if "class-card" in self.options: options["class-card"] = self.options["class-card"] if "img-background" in item: options["img-background"] = item["img-background"] if "img-top" in item: options["img-top"] = item["img-top"] if "img-bottom" in item: options["img-bottom"] = item["img-bottom"] options_str = "\n".join(f":{k}: {v}" for k, v in options.items()) + "\n\n" # Grid card content content_str = "" if "header" in item: content_str += f"{item['header']}\n\n^^^\n\n" if "image" in item: content_str += f"![Gallery image]({item['image']})\n\n" if "content" in item: content_str += f"{item['content']}\n\n" if "footer" in item: content_str += f"+++\n\n{item['footer']}\n\n" title = item.get("title", "") content_str += "\n" grid_items.append( GRID_CARD.format( card_options=options_str, content=content_str, title=title ) ) # Parse the template with Sphinx Design to create an output container = nodes.container() # Prep the options for the template grid container_options = {"gutter": 2, "class-container": "gallery-directive"} if "class-container" in self.options: container_options[ "class-container" ] += f' {self.options["class-container"]}' container_options_str = "\n".join( f":{k}: {v}" for k, v in container_options.items() ) # Create the directive string for the grid grid_directive = TEMPLATE_GRID.format( grid_columns=self.options.get("grid-columns", "1 2 3 4"), container_options=container_options_str, content="\n".join(grid_items), ) # Parse content as a directive so Sphinx Design processes it self.state.nested_parse([grid_directive], 0, container) # Sphinx Design outputs a container too, so just use that container = container.children[0] # Add extra classes if self.options.get("container-class", []): container.attributes["classes"] += self.options.get("class", []) return [container]
0
promptflow_repo/promptflow/scripts/docs
promptflow_repo/promptflow/scripts/docs/_static/custom.css
.title { font-weight:700; } .sd-card-header { font-weight:700; font-size: 16px; } .bd-page-width { max-width: 100rem; } .bd-sidebar-primary { flex: 0 0 20%; } .bd-main .bd-content .bd-article-container { max-width: 70em; } html[data-theme="light"] { --header-announcement-color: #fff070; } html[data-theme="dark"] { --header-announcement-color: #4d4d00; } .bd-header-announcement { background: var(--header-announcement-color); } /* (A) LIGHTBOX BACKGROUND */ #lightbox { /* (A1) COVERS FULLSCREEN */ position: fixed; z-index: 1060; top: 0; left: 0; width: 100%; height: 100%; /* (A2) BACKGROUND */ background: rgba(0, 0, 0, 0.5); /* (A3) CENTER IMAGE ON SCREEN */ display: flex; align-items: center; align-items: center; /* (A4) HIDDEN BY DEFAULT */ visibility: hidden; opacity: 0; /* (A5) SHOW/HIDE ANIMATION */ transition: opacity ease 0.4s; } /* (A6) TOGGLE VISIBILITY */ #lightbox.show { visibility: visible; opacity: 1; } /* (B) LIGHTBOX IMAGE */ #lightbox img { /* (B1) DIMENSIONS */ width: 100%; height: 100%; /* (B2) IMAGE FIT */ /* contain | cover | fill | scale-down */ object-fit: contain; }
0
promptflow_repo/promptflow/scripts/docs
promptflow_repo/promptflow/scripts/docs/_static/custom.js
// Get the head element let head = document.getElementsByTagName("head")[0]; // Create the script element let script = document.createElement("script"); script.async = true; script.src = "https://www.googletagmanager.com/gtag/js?id=G-KZXK5PFBZY"; // Create another script element for the gtag code let script2 = document.createElement("script"); script2.innerHTML = ` window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date());gtag('config', 'G-KZXK5PFBZY'); `; // Insert the script elements after the head element head.insertAdjacentElement("afterbegin", script2); head.insertAdjacentElement("afterbegin", script); // This is used to zoom in images when clicked on window.onload = () => { if (document.getElementById("lightbox") === null){ // Append lightbox div to each page let div = document.createElement('div'); div.innerHTML = '<div id="lightbox"></div>'; document.body.appendChild(div); } // (A) GET LIGHTBOX & ALL .ZOOMD IMAGES let all = document.getElementsByClassName("bd-article")[0].getElementsByTagName("img"), lightbox = document.getElementById("lightbox"); // (B) CLICK TO SHOW IMAGE IN LIGHTBOX // * SIMPLY CLONE INTO LIGHTBOX & SHOW if (all.length>0) { for (let i of all) { i.onclick = () => { let clone = i.cloneNode(); clone.className = ""; lightbox.innerHTML = ""; lightbox.appendChild(clone); lightbox.className = "show"; }; }} // (C) CLICK TO CLOSE LIGHTBOX lightbox.onclick = () => { lightbox.className = ""; }; }; if (window.location.pathname === "/promptflow/" || window.location.pathname === "/promptflow/index.html") { // This is used to control homepage background let observer = new MutationObserver(function(mutations) { const dark = document.documentElement.dataset.theme == 'dark'; document.body.style.backgroundSize = "100%"; document.body.style.backgroundPositionY = "bottom"; document.body.style.backgroundRepeat = "no-repeat" }) observer.observe(document.documentElement, {attributes: true, attributeFilter: ['data-theme']}); }
0
promptflow_repo/promptflow/scripts/docs
promptflow_repo/promptflow/scripts/docs/_static/logo.svg
<svg width="512" height="512" viewBox="0 0 512 512" fill="none" xmlns="http://www.w3.org/2000/svg"> <g clip-path="url(#clip0_699_15212)"> <path fill-rule="evenodd" clip-rule="evenodd" d="M237 39.0408V461.693C237 469.397 228.655 474.208 221.988 470.346L151.918 429.764C130.306 417.247 117 394.164 117 369.19V148.892C117 123.917 130.306 100.834 151.918 88.3177L237 39.0408Z" fill="url(#paint0_linear_699_15212)"/> <path d="M395.075 127.51L237 39V167.541L283.451 192.041L395.075 127.51Z" fill="url(#paint1_linear_699_15212)"/> <path d="M395.075 127.51L237 39V167.541L283.451 192.041L395.075 127.51Z" fill="url(#paint2_linear_699_15212)"/> <path fill-rule="evenodd" clip-rule="evenodd" d="M255.5 231.426C255.5 217.184 263.073 204.017 275.382 196.854L395 127.248V216.101C395 241.03 381.742 264.078 360.193 276.611L270.528 328.76C263.861 332.637 255.5 327.828 255.5 320.116L255.5 231.426Z" fill="url(#paint3_linear_699_15212)"/> </g> <defs> <linearGradient id="paint0_linear_699_15212" x1="196.286" y1="183.041" x2="270.786" y2="92.5087" gradientUnits="userSpaceOnUse"> <stop stop-color="#3272ED"/> <stop offset="1" stop-color="#AF7BD6"/> </linearGradient> <linearGradient id="paint1_linear_699_15212" x1="457.98" y1="131.313" x2="260.351" y2="133.014" gradientUnits="userSpaceOnUse"> <stop stop-color="#DA7ED0"/> <stop offset="0.05" stop-color="#B77BD4"/> <stop offset="0.11" stop-color="#9079DA"/> <stop offset="0.18" stop-color="#6E77DF"/> <stop offset="0.25" stop-color="#5175E3"/> <stop offset="0.33" stop-color="#3973E7"/> <stop offset="0.42" stop-color="#2772E9"/> <stop offset="0.54" stop-color="#1A71EB"/> <stop offset="0.813361" stop-color="#1371EC"/> <stop offset="1" stop-color="#064495"/> </linearGradient> <linearGradient id="paint2_linear_699_15212" x1="210.18" y1="4.19164" x2="307.181" y2="175.949" gradientUnits="userSpaceOnUse"> <stop stop-color="#712575"/> <stop offset="0.09" stop-color="#9A2884"/> <stop offset="0.18" stop-color="#BF2C92"/> <stop offset="0.27" stop-color="#DA2E9C"/> <stop offset="0.34" stop-color="#EB30A2"/> <stop offset="0.4" stop-color="#F131A5"/> <stop offset="0.5" stop-color="#EC30A3"/> <stop offset="0.61" stop-color="#DF2F9E"/> <stop offset="0.72" stop-color="#C92D96"/> <stop offset="0.83" stop-color="#AA2A8A"/> <stop offset="0.95" stop-color="#83267C"/> <stop offset="1" stop-color="#712575"/> </linearGradient> <linearGradient id="paint3_linear_699_15212" x1="308" y1="260.041" x2="307.043" y2="133.204" gradientUnits="userSpaceOnUse"> <stop stop-color="#1D5CD6"/> <stop offset="1" stop-color="#787BE5"/> </linearGradient> <clipPath id="clip0_699_15212"> <rect width="512" height="512" fill="white"/> </clipPath> </defs> </svg>
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/readme/readme_generator.py
import argparse from pathlib import Path from functools import reduce from ghactions_driver.readme_workflow_generate import write_readme_workflow from ghactions_driver.readme_step import ReadmeStepsManage, ReadmeSteps from ghactions_driver.readme_parse import readme_parser from ghactions_driver.telemetry_obj import Telemetry def local_filter(callback, array: [Path]): results = [] for index, item in enumerate(array): result = callback(item, index, array) # if returned true, append item to results if result: results.append(item) return results def no_readme_generation_filter(item: Path, index, array) -> bool: """ If there is no steps in the readme, then no generation """ try: if 'build' in str(item): # skip build folder return False full_text = readme_parser(item.relative_to(ReadmeStepsManage.git_base_dir())) if full_text == "": return False else: return True except Exception as error: print(error) return False # generate readme def main(input_glob, exclude_glob=[], output_files=[]): def set_add(p, q): return p | q def set_difference(p, q): return p - q globs = reduce(set_add, [set(Path(ReadmeStepsManage.git_base_dir()).glob(p)) for p in input_glob], set()) globs_exclude = reduce(set_difference, [set(Path(ReadmeStepsManage.git_base_dir()).glob(p)) for p in exclude_glob], globs) readme_items = sorted([i for i in globs_exclude]) readme_items = local_filter(no_readme_generation_filter, readme_items) for readme in readme_items: readme_telemetry = Telemetry() workflow_name = readme.relative_to(ReadmeStepsManage.git_base_dir()) # Deal with readme write_readme_workflow(workflow_name.resolve(), readme_telemetry) ReadmeSteps.cleanup() output_files.append(readme_telemetry) if __name__ == "__main__": # setup argparse parser = argparse.ArgumentParser() parser.add_argument( "-g", "--input-glob", nargs="+", help="Input Readme.md glob example 'examples/flows/**/Readme.md'", ) args = parser.parse_args() # call main main(args.input_glob)
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/readme/extract_steps_from_readme.py
import argparse from pathlib import Path from jinja2 import Environment, FileSystemLoader from ghactions_driver.readme_parse import readme_parser from ghactions_driver.readme_step import ReadmeStepsManage def write_readme_shell(readme_path: str, output_folder: str): full_text = readme_parser(readme_path) Path(ReadmeStepsManage.git_base_dir()) bash_script_path = ( Path(ReadmeStepsManage.git_base_dir()) / output_folder / "bash_script.sh" ) template_env = Environment( loader=FileSystemLoader( Path(ReadmeStepsManage.git_base_dir()) / "scripts/readme/ghactions_driver/bash_script" ) ) bash_script_template = template_env.get_template("bash_script.sh.jinja2") with open(bash_script_path, "w") as f: f.write(bash_script_template.render({"command": full_text})) if __name__ == "__main__": # setup argparse parser = argparse.ArgumentParser() parser.add_argument( "-f", "--readme-file", help="Input README.md example 'examples/flows/standard/basic/README.md'", ) parser.add_argument( "-o", "--output-folder", help="Output folder for bash_script.sh example 'examples/flows/standard/basic/'", ) args = parser.parse_args() write_readme_shell(args.readme_file, args.output_folder)
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/readme/schema_checker.py
from promptflow._sdk._load_functions import load_yaml from promptflow._sdk._pf_client import PFClient from ghactions_driver.readme_step import ReadmeStepsManage from pathlib import Path import os import subprocess import sys def install(filename): subprocess.check_call([sys.executable, "-m", "pip", "install", "-r", filename]) def main(input_glob_flow_dag): # check if flow.dag.yaml contains schema field. error = False globs = set() pf_client = PFClient() for p in input_glob_flow_dag: globs = globs | set(Path(ReadmeStepsManage.git_base_dir()).glob(p)) flow_dag_items = sorted([i for i in globs]) for file in flow_dag_items: data = load_yaml(file) if "$schema" not in data.keys(): print(f"{file} does not contain $schema field.") error = True if error is False: new_links = [] if (Path(file).parent / "requirements.txt").exists(): install(Path(file).parent / "requirements.txt") if "flow-with-symlinks" in str(file): saved_path = os.getcwd() os.chdir(str(file.parent)) source_folder = Path("../web-classification") for file_name in os.listdir(source_folder): if not Path(file_name).exists(): os.symlink( source_folder / file_name, file_name ) new_links.append(file_name) validation_result = pf_client.flows.validate( flow=file, ) if "flow-with-symlinks" in str(file): for link in new_links: os.remove(link) os.chdir(saved_path) print(f"VALIDATE {file}: \n" + repr(validation_result)) if not validation_result.passed: print(f"{file} is not valid.") error = True if len(validation_result._warnings) > 0: print(f"{file} has warnings.") error = True if error: raise Exception("Some flow.dag.yaml validation failed.") else: print("All flow.dag.yaml validation completed.") if __name__ == "__main__": input_glob_flow_dag = [ "examples/**/flow.dag.yaml", ] main(input_glob_flow_dag)
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/readme/README.md
# Readme Workflow Generator These tools is used to generate workflows from README.md and python notebook files in the [examples](../../examples/) folder. * Generated workflows will be placed in [.github/workflows/samples_*](../../.github/workflows/) folder. * The script will also generate a new explanation [README.md](../../examples/README.md) for all the examples. ## 1. Install dependencies ```bash pip install -r ../../examples/requirements.txt pip install -r ../../examples/dev_requirements.txt ``` ## 2. Generate workflows ### (Option 1) One Step Generation At the **root** of the repository, run the following command: ```bash python scripts/readme/readme.py ``` ### (Option 2) Step by Step Generation At the **root** of the repository, run the following command: ```bash # Generate workflow from README.md inside examples folder python scripts/readme/readme_generator.py -g "examples/**/*.ipynb" # Generate workflow from python notebook inside examples folder python scripts/readme/workflow_generator.py -g "examples/flows/**/README.md" ``` Multiple inputs are supported. ## 3. Options to control generations of examples [README.md](../../examples/README.md) ### 3.1 Notebook Workflow Generation * Each workflow contains metadata area, set `.metadata.description` area will display this message in the corresponding cell in [README.md](../../examples/README.md) file. * When set `.metadata.no_readme_generation` to value `true`, the script will stop generating for this notebook. ### 3.2 README.md Workflow Generation * For README.md files, only `bash` cells will be collected and converted to workflow. No cells will produce no workflow. * Readme descriptions are simply collected from the first sentence in the README.md file just below the title. The script will collect words before the first **.** of the fist paragraph. Multi-line sentence is also supported * A supported description sentence: `This is a sample workflow for testing.` * A not supported description sentence: `Please check www.microsoft.com for more details.`
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/readme/workflow_generator.py
import os import glob import argparse from pathlib import Path import ntpath import re import hashlib import json from jinja2 import Environment, FileSystemLoader from ghactions_driver.readme_step import ReadmeStepsManage from ghactions_driver.resource_resolver import resolve_tutorial_resource from ghactions_driver.telemetry_obj import Telemetry def format_ipynb(notebooks): # run code formatter on .ipynb files for notebook in notebooks: os.system(f"black-nb --clear-output {notebook}") def _get_paths(paths_list): """ Convert the path list to unix format. :param paths_list: The input path list. :returns: The same list with unix-like paths. """ paths_list.sort() if ntpath.sep == os.path.sep: return [pth.replace(ntpath.sep, "/") for pth in paths_list] return paths_list def write_notebook_workflow(notebook, name, output_telemetry=Telemetry()): temp_name_list = re.split(r"/|\.", notebook) temp_name_list = [ x for x in temp_name_list if x != "tutorials" and x != "examples" and x != "ipynb" ] temp_name_list = [x.replace("-", "") for x in temp_name_list] workflow_name = "_".join(["samples"] + temp_name_list) place_to_write = ( Path(ReadmeStepsManage.git_base_dir()) / ".github" / "workflows" / f"{workflow_name}.yml" ) gh_working_dir = "/".join(notebook.split("/")[:-1]) env = Environment( loader=FileSystemLoader("./scripts/readme/ghactions_driver/workflow_templates") ) template = env.get_template("basic_workflow.yml.jinja2") # Schedule notebooks at different times to reduce maximum quota usage. name_hash = int(hashlib.sha512(workflow_name.encode()).hexdigest(), 16) schedule_minute = name_hash % 60 schedule_hour = (name_hash // 60) % 4 + 19 # 19-22 UTC if "tutorials" in gh_working_dir: notebook_path = Path(ReadmeStepsManage.git_base_dir()) / str(notebook) path_filter = resolve_tutorial_resource(workflow_name, notebook_path.resolve()) elif "samples_configuration" in workflow_name: # exception, samples configuration is very simple and not related to other prompt flow examples path_filter = ( "[ examples/configuration.ipynb, .github/workflows/samples_configuration.yml ]" ) else: path_filter = f"[ {gh_working_dir}/**, examples/*requirements.txt, .github/workflows/{workflow_name}.yml ]" # these workflows require config.json to init PF/ML client workflows_require_config_json = [ "configuration", "flowinpipeline", "quickstartazure", "cloudrunmanagement", ] if any(keyword in workflow_name for keyword in workflows_require_config_json): template = env.get_template("workflow_config_json.yml.jinja2") elif "chatwithpdf" in workflow_name: template = env.get_template("pdf_workflow.yml.jinja2") elif "flowasfunction" in workflow_name: template = env.get_template("flow_as_function.yml.jinja2") content = template.render( { "workflow_name": workflow_name, "ci_name": "samples_notebook_ci", "name": name, "gh_working_dir": gh_working_dir, "path_filter": path_filter, "crontab": f"{schedule_minute} {schedule_hour} * * *", "crontab_comment": f"Every day starting at {schedule_hour - 16}:{schedule_minute} BJT", } ) # To customize workflow, add new steps in steps.py # make another function for special cases. with open(place_to_write.resolve(), "w") as f: f.write(content) print(f"Write workflow: {place_to_write.resolve()}") output_telemetry.workflow_name = workflow_name output_telemetry.name = name output_telemetry.gh_working_dir = gh_working_dir output_telemetry.path_filter = path_filter def write_workflows(notebooks, output_telemetries=[]): # process notebooks for notebook in notebooks: # get notebook name output_telemetry = Telemetry() nb_path = Path(notebook) name, _ = os.path.splitext(nb_path.parts[-1]) # write workflow file write_notebook_workflow(notebook, name, output_telemetry) output_telemetry.notebook = nb_path output_telemetries.append(output_telemetry) def local_filter(callback, array): results = [] for index, item in enumerate(array): result = callback(item, index, array) # if returned true, append item to results if result: results.append(item) return results def no_readme_generation_filter(item, index, array) -> bool: """ Set each ipynb metadata no_readme_generation to "true" to skip readme generation """ try: if item.endswith("test.ipynb"): return False # read in notebook with open(item, "r", encoding="utf-8") as f: data = json.load(f) try: if data["metadata"]["no_readme_generation"] is not None: # no_readme_generate == "true", then no generation return data["metadata"]["no_readme_generation"] != "true" except Exception: return True # generate readme except Exception: return False # not generate readme def main(input_glob, output_files=[], check=False): # get list of workflows notebooks = _get_paths( [j for i in [glob.glob(p, recursive=True) for p in input_glob] for j in i] ) # check each workflow, get metadata. notebooks = local_filter(no_readme_generation_filter, notebooks) # format code if not check: format_ipynb(notebooks) # write workflows write_workflows(notebooks, output_files) # run functions if __name__ == "__main__": # setup argparse parser = argparse.ArgumentParser() parser.add_argument( "-g", "--input-glob", nargs="+", help="Input glob example 'examples/**/*.ipynb'" ) args = parser.parse_args() # call main main(input_glob=args.input_glob)
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/readme/readme.py
# Generate Readme file for the examples folder import json from pathlib import Path import workflow_generator import readme_generator from jinja2 import Environment, FileSystemLoader from ghactions_driver.readme_step import ReadmeStepsManage from operator import itemgetter import argparse import sys import os import re BRANCH = "main" def get_notebook_readme_description(notebook) -> str: """ Set each ipynb metadata description at .metadata.description """ try: # read in notebook with open(notebook, "r", encoding="utf-8") as f: data = json.load(f) return data["metadata"]["description"] except Exception: print(f"{notebook} metadata description not set") return "" def get_readme_description_first_sentence(readme) -> str: """ Get each readme first sentence of first paragraph """ try: with open(readme, "r", encoding="utf-8") as f: # read first line line = f.readline() sentence = "" while True: line = f.readline() if line.startswith("#"): line = "" # skip metadata section if line.startswith("---") or line.startswith("resources"): line = "" if line.strip() == "" and sentence != "": break elif "." in line: sentence += " " + line.split(".")[0].strip() break else: if sentence == "": sentence += line.strip() elif line.strip() != "": sentence += " " + line.strip() return sentence except Exception: print(f"Error during reading {readme}") return "" def write_readme(workflow_telemetries, readme_telemetries): global BRANCH ReadmeStepsManage.git_base_dir() readme_file = Path(ReadmeStepsManage.git_base_dir()) / "examples/README.md" quickstarts = { "readmes": [], "notebooks": [], } tutorials = { "readmes": [], "notebooks": [], } flows = { "readmes": [], "notebooks": [], } evaluations = { "readmes": [], "notebooks": [], } chats = { "readmes": [], "notebooks": [], } toolusecases = { "readmes": [], "notebooks": [], } connections = { "readmes": [], "notebooks": [], } for workflow_telemetry in workflow_telemetries: notebook_name = f"{workflow_telemetry.name}.ipynb" gh_working_dir = workflow_telemetry.gh_working_dir pipeline_name = workflow_telemetry.workflow_name yaml_name = f"{pipeline_name}.yml" # For workflows, open ipynb as raw json and # setup description at .metadata.description description = get_notebook_readme_description(workflow_telemetry.notebook) notebook_path = gh_working_dir.replace("examples/", "") + f"/{notebook_name}" if gh_working_dir.startswith("examples/flows/standard"): flows["notebooks"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) elif gh_working_dir.startswith("examples/connections"): connections["notebooks"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) elif gh_working_dir.startswith("examples/flows/evaluation"): evaluations["notebooks"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) elif gh_working_dir.startswith("examples/tutorials"): if "quickstart" in notebook_name: quickstarts["notebooks"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) else: tutorials["notebooks"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) elif gh_working_dir.startswith("examples/flows/chat"): chats["notebooks"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) elif gh_working_dir.startswith("examples/tools/use-cases"): toolusecases["notebooks"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) else: print(f"Unknown workflow type: {gh_working_dir}") # Adjust tutorial names: for readme_telemetry in readme_telemetries: if readme_telemetry.readme_name.endswith("README.md"): notebook_name = readme_telemetry.readme_folder.split("/")[-1] else: notebook_name = readme_telemetry.readme_name.split("/")[-1].replace( ".md", "" ) notebook_path = readme_telemetry.readme_name.replace("examples/", "") pipeline_name = readme_telemetry.workflow_name yaml_name = f"{readme_telemetry.workflow_name}.yml" description = get_readme_description_first_sentence( readme_telemetry.readme_name ) readme_folder = readme_telemetry.readme_folder if readme_folder.startswith("examples/flows/standard"): flows["readmes"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) elif readme_folder.startswith("examples/connections"): connections["readmes"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) elif readme_folder.startswith("examples/flows/evaluation"): evaluations["readmes"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) elif readme_folder.startswith("examples/tutorials"): if "quickstart" in notebook_name: quickstarts["readmes"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) else: tutorials["readmes"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) elif readme_folder.startswith("examples/flows/chat"): chats["readmes"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) elif readme_folder.startswith("examples/tools/use-cases"): toolusecases["readmes"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) else: print(f"Unknown workflow type: {readme_folder}") quickstarts["notebooks"] = sorted( quickstarts["notebooks"], key=itemgetter("name"), reverse=True, ) replacement = { "branch": BRANCH, "tutorials": tutorials, "flows": flows, "evaluations": evaluations, "chats": chats, "toolusecases": toolusecases, "connections": connections, "quickstarts": quickstarts, } print("writing README.md...") env = Environment( loader=FileSystemLoader( Path(ReadmeStepsManage.git_base_dir()) / "scripts/readme/ghactions_driver/readme_templates" ) ) template = env.get_template("README.md.jinja2") with open(readme_file, "w") as f: f.write(template.render(replacement)) print("finished writing README.md") def main(check): if check: # Disable print sys.stdout = open(os.devnull, "w") input_glob = ["examples/**/*.ipynb"] workflow_telemetry = [] workflow_generator.main(input_glob, workflow_telemetry, check=check) input_glob_readme = [ "examples/flows/**/README.md", "examples/connections/**/README.md", "examples/tutorials/e2e-development/*.md", "examples/tutorials/flow-fine-tuning-evaluation/*.md", "examples/tutorials/**/README.md", "examples/tools/use-cases/**/README.md", ] # exclude the readme since this is 3p integration folder, pipeline generation is not included input_glob_readme_exclude = ["examples/flows/integrations/**/README.md"] readme_telemetry = [] readme_generator.main( input_glob_readme, input_glob_readme_exclude, readme_telemetry ) write_readme(workflow_telemetry, readme_telemetry) if check: output_object = {} for workflow in workflow_telemetry: workflow_items = re.split(r"\[|,| |\]", workflow.path_filter) workflow_items = list(filter(None, workflow_items)) output_object[workflow.workflow_name] = [] for item in workflow_items: if item == "examples/*requirements.txt": output_object[workflow.workflow_name].append( "examples/requirements.txt" ) output_object[workflow.workflow_name].append( "examples/dev_requirements.txt" ) continue output_object[workflow.workflow_name].append(item) for readme in readme_telemetry: output_object[readme.workflow_name] = [] readme_items = re.split(r"\[|,| |\]", readme.path_filter) readme_items = list(filter(None, readme_items)) for item in readme_items: if item == "examples/*requirements.txt": output_object[readme.workflow_name].append( "examples/requirements.txt" ) output_object[readme.workflow_name].append( "examples/dev_requirements.txt" ) continue output_object[readme.workflow_name].append(item) # enable output sys.stdout = sys.__stdout__ return output_object else: return "" if __name__ == "__main__": # setup argparse parser = argparse.ArgumentParser() parser.add_argument( "-c", "--check", action="store_true", help="Check what file is affected" ) args = parser.parse_args() output = main(args.check) print(json.dumps(output))
0
promptflow_repo/promptflow/scripts/readme
promptflow_repo/promptflow/scripts/readme/ghactions_driver/telemetry_obj.py
class Telemetry(object): pass
0
promptflow_repo/promptflow/scripts/readme
promptflow_repo/promptflow/scripts/readme/ghactions_driver/readme_parse.py
import io import re from pathlib import Path import panflute import pypandoc from .readme_step import ReadmeStepsManage def strip_comments(code): code = str(code) code = re.sub(r"(?m)^ *#.*\n?", "", code) # remove comments splits = [ll.rstrip() for ll in code.splitlines() if ll.strip()] # remove empty splits_no_interactive = [ split for split in splits if "interactive" not in split and "pf flow serve" not in split and "pf connection delete" not in split ] # remove --interactive and pf flow serve and pf export docker text = "\n".join([ll.rstrip() for ll in splits_no_interactive]) # replacements text = text.replace("<your_api_key>", "$aoai_api_key") text = text.replace("<your_api_base>", "$aoai_api_endpoint") text = text.replace("<your_subscription_id>", "$test_workspace_sub_id") text = text.replace("<your_resource_group_name>", "$test_workspace_rg") text = text.replace("<your_workspace_name>", "$test_workspace_name") return text def prepare(doc): doc.full_text = "" def action(elem, doc): if isinstance(elem, panflute.CodeBlock) and "bash" in elem.classes: doc.full_text = "\n".join([doc.full_text, strip_comments(elem.text)]) def readme_parser(filename: str): real_filename = Path(ReadmeStepsManage.git_base_dir()) / filename data = pypandoc.convert_file(str(real_filename), "json") f = io.StringIO(data) doc = panflute.load(f) panflute.run_filter(action, prepare, doc=doc) return doc.full_text
0
promptflow_repo/promptflow/scripts/readme
promptflow_repo/promptflow/scripts/readme/ghactions_driver/readme_step.py
import subprocess from pathlib import Path import hashlib from jinja2 import Environment, FileSystemLoader, Template from .telemetry_obj import Telemetry class Step: """ StepType in workflow """ Environment = None @staticmethod def init_jinja_loader() -> Environment: jinja_folder_path = ( Path(ReadmeStepsManage.git_base_dir()) / "scripts" / "readme" / "ghactions_driver" / "workflow_steps" ) Step.Environment = Environment( loader=FileSystemLoader(jinja_folder_path.resolve()) ) def __init__(self, name: str) -> None: self.workflow_name = name def get_workflow_step(self) -> str: # virtual method for override return "" @staticmethod def get_workflow_template(step_file_name: str) -> Template: # virtual method for override if Step.Environment is None: Step.init_jinja_loader() template = Step.Environment.get_template(step_file_name) return template class AzureLoginStep(Step): def __init__(self) -> None: Step.__init__(self, "Azure Login") def get_workflow_step(self) -> str: template = Step.get_workflow_template("step_azure_login.yml.jinja2") return template.render( { "step_name": self.workflow_name, } ) class InstallDependenciesStep(Step): def __init__(self) -> None: Step.__init__(self, "Prepare requirements") def get_workflow_step(self) -> str: template = Step.get_workflow_template("step_install_deps.yml.jinja2") return template.render( { "step_name": self.workflow_name, "working_dir": ReadmeSteps.working_dir, } ) class InstallDevDependenciesStep(Step): def __init__(self) -> None: Step.__init__(self, "Prepare dev requirements") def get_workflow_step(self) -> str: template = Step.get_workflow_template("step_install_dev_deps.yml.jinja2") return template.render( { "step_name": self.workflow_name, "working_dir": ReadmeSteps.working_dir, } ) class CreateAoaiFromYaml(Step): def __init__(self, yaml_name: str) -> None: Step.__init__(self, "Create AOAI Connection from YAML") self.yaml_name = yaml_name def get_workflow_step(self) -> str: template = Step.get_workflow_template("step_yml_create_aoai.yml.jinja2") return template.render( { "step_name": self.workflow_name, "yaml_name": self.yaml_name, } ) class ExtractStepsAndRun(Step): def __init__(self) -> None: Step.__init__(self, f"Extract Steps {ReadmeSteps.readme_name}") def get_workflow_step(self) -> str: template = Step.get_workflow_template("step_extract_steps_and_run.yml.jinja2") return template.render( { "step_name": self.workflow_name, "working_dir": ReadmeSteps.working_dir, "readme_name": ReadmeSteps.readme_name, } ) class ExtractStepsAndRunGPTFour(Step): def __init__(self) -> None: Step.__init__(self, f"Extract Steps {ReadmeSteps.readme_name}") def get_workflow_step(self) -> str: template = Step.get_workflow_template( "step_extract_steps_and_run_gpt4.yml.jinja2" ) return template.render( { "step_name": self.workflow_name, "working_dir": ReadmeSteps.working_dir, "readme_name": ReadmeSteps.readme_name, } ) class CreateEnv(Step): def __init__(self) -> None: Step.__init__(self, "Refine .env file") def get_workflow_step(self) -> str: template = Step.get_workflow_template("step_create_env.yml.jinja2") content = template.render( {"step_name": self.workflow_name, "working_dir": ReadmeSteps.working_dir} ) return content class CreateEnvGPTFour(Step): def __init__(self) -> None: Step.__init__(self, "Refine .env file") def get_workflow_step(self) -> str: template = Step.get_workflow_template("step_create_env_gpt4.yml.jinja2") content = template.render( {"step_name": self.workflow_name, "working_dir": ReadmeSteps.working_dir} ) return content class CreateAoaiFromEnv(Step): def __init__(self, connection_name: str) -> None: Step.__init__(self, "Create AOAI Connection from ENV file") self.connection_name = connection_name def get_workflow_step(self) -> str: template = Step.get_workflow_template("step_env_create_aoai.yml.jinja2") content = template.render( { "step_name": self.workflow_name, "working_dir": ReadmeSteps.working_dir, "connection_name": self.connection_name, } ) return content class CreateRunYaml(Step): def __init__(self) -> None: Step.__init__(self, "Create run.yml") def get_workflow_step(self) -> str: template = Step.get_workflow_template("step_create_run_yml.yml.jinja2") content = template.render( {"step_name": self.workflow_name, "working_dir": ReadmeSteps.working_dir} ) return content class ReadmeSteps: """ Static class to record steps, to be filled in workflow templates and Readme """ step_array = [] # Record steps readme_name = "" # Record readme name working_dir = "" # the working directory of flow, relative to git_base_dir template = "" # Select a base template under workflow_templates folder workflow = "" # Target workflow name to be generated @staticmethod def remember_step(step: Step) -> Step: ReadmeSteps.step_array.append(step) return step @staticmethod def get_length() -> int: return len(ReadmeSteps.step_array) # region steps @staticmethod def create_env() -> Step: return ReadmeSteps.remember_step(CreateEnv()) @staticmethod def create_env_gpt4() -> Step: return ReadmeSteps.remember_step(CreateEnvGPTFour()) @staticmethod def yml_create_aoai(yaml_name: str) -> Step: return ReadmeSteps.remember_step(CreateAoaiFromYaml(yaml_name=yaml_name)) @staticmethod def env_create_aoai(connection_name: str) -> Step: return ReadmeSteps.remember_step( CreateAoaiFromEnv(connection_name=connection_name) ) @staticmethod def azure_login() -> Step: return ReadmeSteps.remember_step(AzureLoginStep()) @staticmethod def install_dependencies() -> Step: return ReadmeSteps.remember_step(InstallDependenciesStep()) @staticmethod def install_dev_dependencies() -> Step: return ReadmeSteps.remember_step(InstallDevDependenciesStep()) @staticmethod def create_run_yaml() -> Step: return ReadmeSteps.remember_step(CreateRunYaml()) @staticmethod def extract_steps_and_run() -> Step: return ReadmeSteps.remember_step(ExtractStepsAndRun()) @staticmethod def extract_steps_and_run_gpt_four() -> Step: return ReadmeSteps.remember_step(ExtractStepsAndRunGPTFour()) # endregion steps @staticmethod def setup_target( working_dir: str, template: str, target: str, readme_name: str ) -> str: """ Used at the very head of jinja template to indicate basic information """ ReadmeSteps.working_dir = working_dir ReadmeSteps.template = template ReadmeSteps.workflow = target ReadmeSteps.step_array = [] ReadmeSteps.readme_name = readme_name return "" @staticmethod def cleanup() -> None: ReadmeSteps.working_dir = "" ReadmeSteps.template = "" ReadmeSteps.workflow = "" ReadmeSteps.step_array = [] class ReadmeStepsManage: """ # Static methods for manage all readme steps """ repo_base_dir = "" @staticmethod def git_base_dir() -> str: """ Get the base directory of the git repo """ if ReadmeStepsManage.repo_base_dir == "": try: ReadmeStepsManage.repo_base_dir = ( subprocess.check_output(["git", "rev-parse", "--show-toplevel"]) .decode("utf-8") .strip() ) raise Exception("Not in git repo") except Exception: ReadmeStepsManage.repo_base_dir = Path(__file__).parent.parent.parent.parent.resolve() print(ReadmeStepsManage.repo_base_dir) return ReadmeStepsManage.repo_base_dir @staticmethod def write_workflow( workflow_name: str, pipeline_name: str, output_telemetry=Telemetry() ) -> None: # Schedule notebooks at different times to reduce maximum quota usage. name_hash = int(hashlib.sha512(workflow_name.encode()).hexdigest(), 16) schedule_minute = name_hash % 60 schedule_hour = (name_hash // 60) % 4 + 19 # 19-22 UTC if "tutorials" in workflow_name: # markdown filename has some exceptions, special handle here if "chat_with_pdf" in workflow_name: readme_name = "chat-with-pdf.md" elif ( "fine_tuning_evaluation_promptflow_quality_improvement" in workflow_name ): readme_name = "promptflow-quality-improvement.md" else: readme_name = "README.md" readme_path = ( Path(ReadmeStepsManage.git_base_dir()) / ReadmeSteps.working_dir / readme_name ) # local import to avoid circular import from .resource_resolver import resolve_tutorial_resource path_filter = resolve_tutorial_resource( workflow_name, readme_path.resolve() ) else: if ( "flow_with_additional_includes" in workflow_name or "flow_with_symlinks" in workflow_name ): # these two flows have dependencies on flow web-classification # so corresponding workflows should also listen to changes in web-classification path_filter = ( f"[ {ReadmeSteps.working_dir}/**, " + "examples/*requirements.txt, " + "examples/flows/standard/web-classification/**, " + f".github/workflows/{workflow_name}.yml ]" ) else: path_filter = ( f"[ {ReadmeSteps.working_dir}/**, " + "examples/*requirements.txt, " + f".github/workflows/{workflow_name}.yml ]" ) replacements = { "steps": ReadmeSteps.step_array, "workflow_name": workflow_name, "ci_name": pipeline_name, "path_filter": path_filter, "crontab": f"{schedule_minute} {schedule_hour} * * *", "crontab_comment": f"Every day starting at {schedule_hour - 16}:{schedule_minute} BJT", } workflow_template_path = ( Path(ReadmeStepsManage.git_base_dir()) / "scripts" / "readme" / "ghactions_driver" / "workflow_templates" ) target_path = ( Path(ReadmeStepsManage.git_base_dir()) / ".github" / "workflows" / f"{workflow_name}.yml" ) template = Environment( loader=FileSystemLoader(workflow_template_path.resolve()) ).get_template(ReadmeSteps.template) content = template.render(replacements) with open(target_path.resolve(), "w", encoding="utf-8") as f: f.write(content) print(f"Write readme workflow: {target_path.resolve()}") output_telemetry.workflow_name = workflow_name output_telemetry.target_path = target_path output_telemetry.readme_folder = ReadmeSteps.working_dir output_telemetry.readme_name = ReadmeSteps.readme_name output_telemetry.path_filter = path_filter
0
promptflow_repo/promptflow/scripts/readme
promptflow_repo/promptflow/scripts/readme/ghactions_driver/resource_resolver.py
from pathlib import Path from typing import List import markdown import nbformat from .readme_step import ReadmeStepsManage RESOURCES_KEY_NAME = "resources" RESOURCES_KEY_ERROR_MESSAGE = ( "Please follow examples contributing guide to declare tutorial resources: " "https://github.com/microsoft/promptflow/blob/main/examples/CONTRIBUTING.md" ) def _parse_resources_string_from_notebook(path: Path) -> str: with open(path, "r", encoding="utf-8") as f: nb = nbformat.read(f, as_version=4) if RESOURCES_KEY_NAME not in nb.metadata: raise Exception(RESOURCES_KEY_ERROR_MESSAGE) return nb.metadata[RESOURCES_KEY_NAME] def _parse_resources_string_from_markdown(path: Path) -> str: markdown_content = path.read_text(encoding="utf-8") md = markdown.Markdown(extensions=["meta"]) md.convert(markdown_content) if RESOURCES_KEY_NAME not in md.Meta: raise Exception(RESOURCES_KEY_ERROR_MESSAGE) return md.Meta[RESOURCES_KEY_NAME][0] def _parse_resources(path: Path) -> List[str]: if path.suffix == ".ipynb": resources_string = _parse_resources_string_from_notebook(path) elif path.suffix == ".md": resources_string = _parse_resources_string_from_markdown(path) else: raise Exception(f"Unknown file type: {path.suffix!r}") return [resource.strip() for resource in resources_string.split(",")] def resolve_tutorial_resource(workflow_name: str, resource_path: Path) -> str: """Resolve tutorial resources, so that workflow can be triggered more precisely. A tutorial workflow should listen to changes of: 1. working directory 2. resources declared in notebook/markdown metadata 3. workflow file 4. examples/requirements.txt (for release verification) 5. examples/connections/azure_openai.yml (fall back as it is the most basic and common connection) """ # working directory git_base_dir = Path(ReadmeStepsManage.git_base_dir()) working_dir = resource_path.parent.relative_to(git_base_dir).as_posix() path_filter_list = [f"{working_dir}/**"] # resources declared in text file resources = _parse_resources(resource_path) for resource in resources: # skip empty line if len(resource) == 0: continue # validate resource path exists resource_path = (git_base_dir / resource).resolve() if not resource_path.exists(): raise FileNotFoundError("Please declare tutorial resources path whose base is the git repo root.") elif resource_path.is_file(): path_filter_list.append(resource) else: path_filter_list.append(f"{resource}/**") # workflow file path_filter_list.append(f".github/workflows/{workflow_name}.yml") # manually add examples/requirements.txt if not exists examples_req = "examples/requirements.txt" if examples_req not in path_filter_list: path_filter_list.append(examples_req) # manually add examples/connections/azure_openai.yml if not exists aoai_conn = "examples/connections/azure_openai.yml" if aoai_conn not in path_filter_list: path_filter_list.append(aoai_conn) return "[ " + ", ".join(path_filter_list) + " ]"
0
promptflow_repo/promptflow/scripts/readme
promptflow_repo/promptflow/scripts/readme/ghactions_driver/readme_workflow_generate.py
from pathlib import Path from .readme_step import ReadmeStepsManage, ReadmeSteps from ghactions_driver.telemetry_obj import Telemetry def write_readme_workflow(readme_path, output_telemetry=Telemetry()): relative_path = Path(readme_path).relative_to( Path(ReadmeStepsManage.git_base_dir()) ) workflow_path = relative_path.parent.as_posix() relative_name_path = Path(readme_path).relative_to( Path(ReadmeStepsManage.git_base_dir()) / "examples" ) workflow_name = ( relative_name_path.as_posix() .replace(".md", "") .replace("/README", "") .replace("/", "_") .replace("-", "_") ) workflow_name = "samples_" + workflow_name ReadmeSteps.setup_target( working_dir=workflow_path, template="basic_workflow_replace_config_json.yml.jinja2" if "e2e_development_chat_with_pdf" in workflow_name else "basic_workflow_replace.yml.jinja2", target=f"{workflow_name}.yml", readme_name=relative_path.as_posix(), ) ReadmeSteps.install_dependencies() ReadmeSteps.install_dev_dependencies() if ( workflow_name.endswith("flows_chat_chat_with_image") or workflow_name.endswith("flows_standard_describe_image") ): ReadmeSteps.create_env_gpt4() ReadmeSteps.env_create_aoai("aoai_gpt4v_connection") else: ReadmeSteps.create_env() if workflow_name.endswith("pdf"): ReadmeSteps.env_create_aoai("chat_with_pdf_custom_connection") ReadmeSteps.create_run_yaml() if ( workflow_name.endswith("flows_standard_basic_with_builtin_llm") or workflow_name.endswith("flows_standard_flow_with_symlinks") or workflow_name.endswith("flows_standard_flow_with_additional_includes") or workflow_name.endswith("flows_standard_basic_with_connection") ): ReadmeSteps.yml_create_aoai("examples/connections/azure_openai.yml") ReadmeSteps.azure_login() if ( workflow_name.endswith("flows_chat_chat_with_image") or workflow_name.endswith("flows_standard_describe_image") ): ReadmeSteps.extract_steps_and_run_gpt_four() else: ReadmeSteps.extract_steps_and_run() ReadmeStepsManage.write_workflow( workflow_name, "samples_readme_ci", output_telemetry ) ReadmeSteps.cleanup()
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/bash_script/bash_script.sh.jinja2
#!/usr/bin/env bash set -xe {{ command }}
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/readme_templates/README.md.jinja2
# Promptflow examples [![code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) [![license: MIT](https://img.shields.io/badge/License-MIT-purple.svg)](../LICENSE) ## Get started **Install dependencies** - Bootstrap your python environment. - e.g: create a new [conda](https://conda.io/projects/conda/en/latest/user-guide/getting-started.html) environment. `conda create -n pf-examples python=3.9`. - install required packages in python environment : `pip install -r requirements.txt` - show installed sdk: `pip show promptflow` **Quick start** | path | status | description | ------|--------|------------- {% for quickstart in quickstarts.notebooks %}| [{{ quickstart.name }}]({{ quickstart.path }}) | [![{{quickstart.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{quickstart.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{quickstart.yaml_name}}) | {{ quickstart.description }} | {% endfor %} ## CLI examples ### Tutorials ([tutorials](tutorials)) | path | status | description | ------|--------|------------- {% for tutorial in tutorials.readmes %}| [{{ tutorial.name }}]({{ tutorial.path }}) | [![{{tutorial.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{tutorial.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{tutorial.yaml_name}}) | {{ tutorial.description }} | {% endfor %} ### Flows ([flows](flows)) #### [Standard flows](flows/standard/) | path | status | description | ------|--------|------------- {% for flow in flows.readmes %}| [{{ flow.name }}]({{ flow.path }}) | [![{{flow.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{flow.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{flow.yaml_name}}) | {{ flow.description }} | {% endfor %} #### [Evaluation flows](flows/evaluation/) | path | status | description | ------|--------|------------- {% for evaluation in evaluations.readmes %}| [{{ evaluation.name }}]({{ evaluation.path }}) | [![{{evaluation.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{evaluation.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{evaluation.yaml_name}}) | {{ evaluation.description }} | {% endfor %} #### [Chat flows](flows/chat/) | path | status | description | ------|--------|------------- {% for chat in chats.readmes %}| [{{ chat.name }}]({{ chat.path }}) | [![{{chat.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{chat.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{chat.yaml_name}}) | {{ chat.description }} | {% endfor %} ### Tool Use Cases ([Tool Use Cases](tools/use-cases)) | path | status | description | ------|--------|------------- {% for toolusecase in toolusecases.readmes %}| [{{ toolusecase.name }}]({{ toolusecase.path }}) | [![{{toolusecase.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{toolusecase.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{toolusecase.yaml_name}}) | {{ toolusecase.description }} | {% endfor %} ### Connections ([connections](connections)) | path | status | description | ------|--------|------------- {% for connection in connections.readmes %}| [{{ connection.name }}]({{ connection.path }}) | [![{{connection.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{connection.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{connection.yaml_name}}) | {{ connection.description }} | {% endfor %} ## SDK examples | path | status | description | ------|--------|------------- {% for quickstart in quickstarts.notebooks %}| [{{ quickstart.name }}]({{ quickstart.path }}) | [![{{quickstart.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{quickstart.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{quickstart.yaml_name}}) | {{ quickstart.description }} | {% endfor %} {%- for tutorial in tutorials.notebooks -%}| [{{ tutorial.name }}]({{ tutorial.path }}) | [![{{tutorial.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{tutorial.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{tutorial.yaml_name}}) | {{ tutorial.description }} | {% endfor %} {%- if connections.notebooks|length > 0 -%}{% for connection in connections.notebooks %}| [{{ connection.name }}]({{ connection.path }}) | [![{{connection.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{connection.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{connection.yaml_name}}) | {{ connection.description }} | {% endfor %}{% endif %} {%- if chats.notebooks|length > 0 -%}{% for chat in chats.notebooks %}| [{{ chat.name }}]({{ chat.path }}) | [![{{chat.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{chat.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{chat.yaml_name}}) | {{ chat.description }} | {% endfor %}{% endif %} {%- if evaluations.notebooks|length > 0 -%}{% for evaluation in evaluations.notebooks %}| [{{ evaluation.name }}]({{ evaluation.path }}) | [![{{evaluation.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{evaluation.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{evaluation.yaml_name}}) | {{ evaluation.description }} | {% endfor %}{% endif %} {%- if flows.notebooks|length > 0 -%}{% for flow in flows.notebooks %}| [{{ flow.name }}]({{ flow.path }}) | [![{{flow.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{flow.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{flow.yaml_name}}) | {{ flow.description }} | {% endfor %}{% endif %} ## Contributing We welcome contributions and suggestions! Please see the [contributing guidelines](../CONTRIBUTING.md) for details. ## Code of Conduct This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). Please see the [code of conduct](../CODE_OF_CONDUCT.md) for details. ## Reference * [Promptflow documentation](https://microsoft.github.io/promptflow/)
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_steps/step_extract_steps_and_run.yml.jinja2
- name: {{ step_name }} working-directory: ${{ '{{' }} github.workspace }} run: | python scripts/readme/extract_steps_from_readme.py -f {{ readme_name }} -o {{ working_dir }} - name: Cat script working-directory: {{ working_dir }} run: | cat bash_script.sh - name: Run scripts against canary workspace (scheduled runs only) if: github.event_name == 'schedule' working-directory: {{ working_dir }} run: | export aoai_api_key=${{ '{{' }}secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ '{{' }} secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ '{{' }} secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ '{{' }} secrets.TEST_WORKSPACE_NAME_CANARY }} bash bash_script.sh - name: Run scripts against production workspace if: github.event_name != 'schedule' working-directory: {{ working_dir }} run: | export aoai_api_key=${{ '{{' }}secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ '{{' }} secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ '{{' }} secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ '{{' }} secrets.TEST_WORKSPACE_NAME_PROD }} bash bash_script.sh - name: Pip List for Debug if : ${{ '{{' }} always() }} working-directory: {{ working_dir }} run: | pip list - name: Upload artifact if: ${{ '{{' }} always() }} uses: actions/upload-artifact@v3 with: name: artifact path: {{ working_dir }}/bash_script.sh
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_steps/step_create_env.yml.jinja2
- name: {{ step_name }} working-directory: {{ working_dir }} run: | AOAI_API_KEY=${{ '{{' }} secrets.AOAI_API_KEY_TEST }} AOAI_API_ENDPOINT=${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }} AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) if [[ -e .env.example ]]; then echo "env replacement" sed -i -e "s/<your_AOAI_key>/$AOAI_API_KEY/g" -e "s/<your_AOAI_endpoint>/$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_steps/step_azure_login.yml.jinja2
- name: {{ step_name }} uses: azure/login@v1 with: creds: ${{ '{{' }} secrets.AZURE_CREDENTIALS }}
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_steps/step_yml_create_aoai.yml.jinja2
- name: {{ step_name }} working-directory: ${{ '{{' }} github.workspace }} run: pf connection create --file {{ yaml_name }} --set api_key=${{ '{{' }} secrets.AOAI_API_KEY_TEST }} api_base=${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }}
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_steps/step_extract_steps_and_run_gpt4.yml.jinja2
- name: {{ step_name }} working-directory: ${{ '{{' }} github.workspace }} run: | python scripts/readme/extract_steps_from_readme.py -f {{ readme_name }} -o {{ working_dir }} - name: Cat script working-directory: {{ working_dir }} run: | cat bash_script.sh - name: Run scripts against canary workspace (scheduled runs only) if: github.event_name == 'schedule' working-directory: {{ working_dir }} run: | export aoai_api_key=${{ '{{' }}secrets.AOAI_GPT_4V_KEY }} export aoai_api_endpoint=${{ '{{' }} secrets.AOAI_GPT_4V_ENDPOINT }} export test_workspace_sub_id=${{ '{{' }} secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ '{{' }} secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ '{{' }} secrets.TEST_WORKSPACE_NAME_CANARY }} bash bash_script.sh - name: Run scripts against production workspace if: github.event_name != 'schedule' working-directory: {{ working_dir }} run: | export aoai_api_key=${{ '{{' }}secrets.AOAI_GPT_4V_KEY }} export aoai_api_endpoint=${{ '{{' }} secrets.AOAI_GPT_4V_ENDPOINT }} export test_workspace_sub_id=${{ '{{' }} secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ '{{' }} secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ '{{' }} secrets.TEST_WORKSPACE_NAME_PROD }} bash bash_script.sh - name: Pip List for Debug if : ${{ '{{' }} always() }} working-directory: {{ working_dir }} run: | pip list - name: Upload artifact if: ${{ '{{' }} always() }} uses: actions/upload-artifact@v3 with: name: artifact path: {{ working_dir }}/bash_script.sh
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_steps/step_install_dev_deps.yml.jinja2
- name: {{ step_name }} working-directory: examples run: | python -m pip install --upgrade pip pip install -r dev_requirements.txt
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_steps/step_install_deps.yml.jinja2
- name: {{ step_name }} working-directory: examples run: | if [[ -e requirements.txt ]]; then python -m pip install --upgrade pip pip install -r requirements.txt fi
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_steps/step_env_create_aoai.yml.jinja2
- name: {{ step_name }} working-directory: {{ working_dir }} run: | if [[ -e .env ]]; then pf connection create --file .env --name {{ connection_name }} fi if [[ -e azure_openai.yml ]]; then pf connection create --file azure_openai.yml --name {{ connection_name }} fi pf connection list
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_steps/step_create_env_gpt4.yml.jinja2
- name: {{ step_name }} working-directory: {{ working_dir }} run: | AOAI_API_KEY=${{ '{{' }} secrets.AOAI_GPT_4V_KEY }} AOAI_API_ENDPOINT=${{ '{{' }} secrets.AOAI_GPT_4V_ENDPOINT }} AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) cp ../../../connections/azure_openai.yml ./azure_openai.yml sed -i -e "s/<user-input>/$AOAI_API_KEY/g" -e "s/aoai-api-endpoint/$AOAI_API_ENDPOINT/g" azure_openai.yml
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_steps/step_create_run_yml.yml.jinja2
- name: {{ step_name }} working-directory: {{ working_dir }} run: | gpt_base=${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }} gpt_base=$(echo ${gpt_base//\//\\/}) if [[ -e run.yml ]]; then sed -i -e "s/\${azure_open_ai_connection.api_key}/${{ '{{' }} secrets.AOAI_API_KEY_TEST }}/g" -e "s/\${azure_open_ai_connection.api_base}/$gpt_base/g" run.yml fi
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_templates/flow_as_function.yml.jinja2
{% extends "workflow_skeleton.yml.jinja2" %} {% block steps %} runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 - name: Azure Login uses: azure/login@v1 with: creds: ${{ '{{' }} secrets.AZURE_CREDENTIALS }} - name: Setup Python 3.9 environment uses: actions/setup-python@v4 with: python-version: "3.9" - name: Prepare requirements run: | python -m pip install --upgrade pip pip install -r ${{ '{{' }} github.workspace }}/examples/requirements.txt pip install -r ${{ '{{' }} github.workspace }}/examples/dev_requirements.txt - name: Create Aoai Connection run: pf connection create -f ${{ '{{' }} github.workspace }}/examples/connections/azure_openai.yml --set api_key="${{ '{{' }} secrets.AOAI_API_KEY_TEST }}" api_base="${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }}" - name: Create new Aoai Connection run: pf connection create -f ${{ '{{' }} github.workspace }}/examples/connections/azure_openai.yml --set api_key="${{ '{{' }} secrets.AOAI_API_KEY_TEST }}" api_base="${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }}" name=new_ai_connection - name: Test Notebook working-directory: {{ gh_working_dir }} run: | papermill -k python {{ name }}.ipynb {{ name }}.output.ipynb -p api_key ${{ '{{' }} secrets.AOAI_API_KEY_TEST }} -p api_base ${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }} -p api_version 2023-07-01-preview - name: Upload artifact if: ${{ '{{' }} always() }} uses: actions/upload-artifact@v3 with: name: artifact path: {{ gh_working_dir }} {% endblock steps %}
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_templates/workflow_skeleton.yml.jinja2
# This code is autogenerated. # Code is generated by running custom script: python3 readme.py # Any manual changes to this file may cause incorrect behavior. # Any manual changes will be overwritten if the code is regenerated. name: {{ workflow_name }} on: schedule: - cron: "{{ crontab }}" # {{ crontab_comment }} pull_request: branches: [ main ] paths: {{ path_filter }} workflow_dispatch: env: IS_IN_CI_PIPELINE: "true" jobs: {{ workflow_name }}: {%- filter indent(width=4) -%} {% block steps %} {% endblock steps %} {%- endfilter -%}
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_templates/pdf_workflow.yml.jinja2
{% extends "workflow_skeleton.yml.jinja2" %} {% block steps %} runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 - name: Generate config.json for canary workspace (scheduled runs only) if: github.event_name == 'schedule' run: echo '${{ '{{' }} secrets.TEST_WORKSPACE_CONFIG_JSON_CANARY }}' > ${{ '{{' }} github.workspace }}/examples/config.json - name: Generate config.json for production workspace if: github.event_name != 'schedule' run: echo '${{ '{{' }} secrets.EXAMPLE_WORKSPACE_CONFIG_JSON_PROD }}' > ${{ '{{' }} github.workspace }}/examples/config.json - name: Setup Python 3.9 environment uses: actions/setup-python@v4 with: python-version: "3.9" - name: Prepare sample requirements working-directory: {{ gh_working_dir }} run: | python -m pip install --upgrade pip pip install -r requirements.txt - name: Prepare requirements run: | python -m pip install --upgrade pip pip install -r ${{ '{{' }} github.workspace }}/examples/requirements.txt pip install -r ${{ '{{' }} github.workspace }}/examples/dev_requirements.txt - name: Create Chat With PDF Custom Connection working-directory: {{ gh_working_dir }} run: | AOAI_API_KEY=${{ '{{' }} secrets.AOAI_API_KEY_TEST }} AOAI_API_ENDPOINT=${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }} AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) if [[ -e .env.example ]]; then echo "env replacement" sed -i -e "s/<your_AOAI_key>/$AOAI_API_KEY/g" -e "s/<your_AOAI_endpoint>/$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env pf connection create --file .env --name chat_with_pdf_custom_connection fi - name: Create AOAI Connection working-directory: examples/connections run: | AOAI_API_KEY=${{ '{{' }} secrets.AOAI_API_KEY_TEST }} AOAI_API_ENDPOINT=${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }} if [[ -e azure_openai.yml ]]; then pf connection create --file azure_openai.yml --set api_key=$AOAI_API_KEY api_base=$AOAI_API_ENDPOINT fi - name: Azure Login uses: azure/login@v1 with: creds: ${{ '{{' }} secrets.AZURE_CREDENTIALS }} - name: Test Notebook working-directory: {{ gh_working_dir }} run: | papermill -k python {{ name }}.ipynb {{ name }}.output.ipynb - name: Upload artifact if: ${{ '{{' }} always() }} uses: actions/upload-artifact@v3 with: name: artifact path: {{ gh_working_dir }} {% endblock steps %}
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_templates/workflow_config_json.yml.jinja2
{% extends "workflow_skeleton.yml.jinja2" %} {% block steps %} runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 - name: Generate config.json for canary workspace (scheduled runs only) if: github.event_name == 'schedule' run: echo '${{ '{{' }} secrets.TEST_WORKSPACE_CONFIG_JSON_CANARY }}' > ${{ '{{' }} github.workspace }}/examples/config.json - name: Generate config.json for production workspace if: github.event_name != 'schedule' run: echo '${{ '{{' }} secrets.EXAMPLE_WORKSPACE_CONFIG_JSON_PROD }}' > ${{ '{{' }} github.workspace }}/examples/config.json - name: Azure Login uses: azure/login@v1 with: creds: ${{ '{{' }} secrets.AZURE_CREDENTIALS }} - name: Setup Python 3.9 environment uses: actions/setup-python@v4 with: python-version: "3.9" - name: Prepare requirements run: | python -m pip install --upgrade pip pip install -r ${{ '{{' }} github.workspace }}/examples/requirements.txt pip install -r ${{ '{{' }} github.workspace }}/examples/dev_requirements.txt - name: Create Aoai Connection run: pf connection create -f ${{ '{{' }} github.workspace }}/examples/connections/azure_openai.yml --set api_key="${{ '{{' }} secrets.AOAI_API_KEY_TEST }}" api_base="${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }}" - name: Test Notebook working-directory: {{ gh_working_dir }} run: | papermill -k python {{ name }}.ipynb {{ name }}.output.ipynb - name: Upload artifact if: ${{ '{{' }} always() }} uses: actions/upload-artifact@v3 with: name: artifact path: {{ gh_working_dir }} {% endblock steps %}
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_templates/basic_workflow_replace.yml.jinja2
{% extends "workflow_skeleton.yml.jinja2" %} {% block steps %} runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 - name: Setup Python 3.9 environment uses: actions/setup-python@v4 with: python-version: "3.9" {%- filter indent(width=2) -%} {% for step in steps %} {{ step.get_workflow_step() }}{% endfor %} {%- endfilter -%} {% endblock steps %}
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_templates/basic_workflow.yml.jinja2
{% extends "workflow_skeleton.yml.jinja2" %} {% block steps %} runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 - name: Azure Login uses: azure/login@v1 with: creds: ${{ '{{' }} secrets.AZURE_CREDENTIALS }} - name: Setup Python 3.9 environment uses: actions/setup-python@v4 with: python-version: "3.9" - name: Prepare requirements run: | python -m pip install --upgrade pip pip install -r ${{ '{{' }} github.workspace }}/examples/requirements.txt pip install -r ${{ '{{' }} github.workspace }}/examples/dev_requirements.txt - name: Create Aoai Connection run: pf connection create -f ${{ '{{' }} github.workspace }}/examples/connections/azure_openai.yml --set api_key="${{ '{{' }} secrets.AOAI_API_KEY_TEST }}" api_base="${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }}" - name: Test Notebook working-directory: {{ gh_working_dir }} run: | papermill -k python {{ name }}.ipynb {{ name }}.output.ipynb - name: Upload artifact if: ${{ '{{' }} always() }} uses: actions/upload-artifact@v3 with: name: artifact path: {{ gh_working_dir }} {% endblock steps %}
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_templates/basic_workflow_replace_config_json.yml.jinja2
{% extends "workflow_skeleton.yml.jinja2" %} {% block steps %} runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 - name: Setup Python 3.9 environment uses: actions/setup-python@v4 with: python-version: "3.9" - name: Generate config.json for canary workspace (scheduled runs only) if: github.event_name == 'schedule' run: echo '${{ '{{' }} secrets.TEST_WORKSPACE_CONFIG_JSON_CANARY }}' > ${{ '{{' }} github.workspace }}/examples/config.json - name: Generate config.json for production workspace if: github.event_name != 'schedule' run: echo '${{ '{{' }} secrets.EXAMPLE_WORKSPACE_CONFIG_JSON_PROD }}' > ${{ '{{' }} github.workspace }}/examples/config.json {%- filter indent(width=2) -%} {% for step in steps %} {{ step.get_workflow_step() }}{% endfor %} {%- endfilter -%} {% endblock steps %}
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/json_schema/gen_json_schema.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- # flake8: noqa # This file is part of scripts\generate_json_schema.py in sdk-cli-v2, which is used to generate json schema # To use this script, run `python <this_file>` in promptflow env, # and the json schema will be generated in the same folder. from inspect import isclass import json from azure.ai.ml._schema import ExperimentalField from promptflow._sdk.schemas._base import YamlFileSchema from promptflow._sdk.schemas._fields import UnionField from marshmallow import Schema, fields, missing from marshmallow.class_registry import get_class from marshmallow_jsonschema import JSONSchema class PatchedJSONSchema(JSONSchema): required = fields.Method("get_required") properties = fields.Method("get_properties") def __init__(self, *args, **kwargs): """Setup internal cache of nested fields, to prevent recursion. :param bool props_ordered: if `True` order of properties will be save as declare in class, else will using sorting, default is `False`. Note: For the marshmallow scheme, also need to enable ordering of fields too (via `class Meta`, attribute `ordered`). """ self._nested_schema_classes = {} self.nested = kwargs.pop("nested", False) self.props_ordered = kwargs.pop("props_ordered", False) setattr(self.opts, "ordered", self.props_ordered) super().__init__(*args, **kwargs) # cspell: ignore pytype def _from_python_type(self, obj, field, pytype): metadata = field.metadata.get("metadata", {}) metadata.update(field.metadata) # This is in the upcoming release of marshmallow-jsonschema, but not available yet if isinstance(field, fields.Dict): values = metadata.get("values", None) or field.value_field json_schema = {"title": field.attribute or field.data_key or field.name} json_schema["type"] = "object" if values: values.parent = field json_schema["additionalProperties"] = self._get_schema_for_field(obj, values) if values else {} return json_schema if isinstance(field, fields.Raw): json_schema = {"title": field.attribute or field.data_key or field.name} return json_schema return super()._from_python_type(obj, field, pytype) def _get_schema_for_field(self, obj, field): """Get schema and validators for field.""" if hasattr(field, "_jsonschema_type_mapping"): schema = field._jsonschema_type_mapping() # pylint: disable=protected-access elif "_jsonschema_type_mapping" in field.metadata: schema = field.metadata["_jsonschema_type_mapping"] else: if isinstance(field, UnionField): schema = self._get_schema_for_union_field(obj, field) elif isinstance(field, ExperimentalField): schema = self._get_schema_for_field(obj, field.experimental_field) elif isinstance(field, fields.Constant): schema = {"const": field.constant} else: schema = super()._get_schema_for_field(obj, field) if field.data_key: schema["title"] = field.data_key return schema def _get_schema_for_union_field(self, obj, field): has_yaml_option = False schemas = [] for field_item in field._union_fields: # pylint: disable=protected-access if isinstance(field_item, fields.Nested) and isinstance(field_item.schema, YamlFileSchema): has_yaml_option = True schemas.append(self._get_schema_for_field(obj, field_item)) if has_yaml_option: schemas.append({"type": "string", "pattern": "^file:.*"}) if field.allow_none: schemas.append({"type": "null"}) if field.is_strict: schema = {"oneOf": schemas} else: schema = {"anyOf": schemas} # This happens in the super() call to get_schema, doing here to allow for adding # descriptions and other schema attributes from marshmallow metadata metadata = field.metadata.get("metadata", {}) for md_key, md_val in metadata.items(): if md_key in ("metadata", "name"): continue schema[md_key] = md_val return schema def _from_nested_schema(self, obj, field): """patch in context for nested field""" if isinstance(field.nested, (str, bytes)): nested = get_class(field.nested) else: nested = field.nested if isclass(nested) and issubclass(nested, Schema): only = field.only exclude = field.exclude context = getattr(field.parent, "context", {}) field.nested = nested(only=only, exclude=exclude, context=context) return super()._from_nested_schema(obj, field) def get_properties(self, obj): """Fill out properties field.""" properties = self.dict_class() if self.props_ordered: fields_items_sequence = obj.fields.items() else: fields_items_sequence = sorted(obj.fields.items()) for _, field in fields_items_sequence: schema = self._get_schema_for_field(obj, field) properties[field.metadata.get("name") or field.data_key or field.name] = schema return properties def get_required(self, obj): """Fill out required field.""" required = [] for _, field in sorted(obj.fields.items()): if field.required: required.append(field.metadata.get("name") or field.data_key or field.name) return required or missing from promptflow._sdk.schemas._connection import AzureOpenAIConnectionSchema, OpenAIConnectionSchema, \ QdrantConnectionSchema, CognitiveSearchConnectionSchema, SerpConnectionSchema, AzureContentSafetyConnectionSchema, \ FormRecognizerConnectionSchema, CustomConnectionSchema, WeaviateConnectionSchema from promptflow._sdk.schemas._run import RunSchema from promptflow._sdk.schemas._flow import FlowSchema, EagerFlowSchema if __name__ == "__main__": cls_list = [FlowSchema, EagerFlowSchema] schema_list = [] for cls in cls_list: target_schema = PatchedJSONSchema().dump(cls(context={"base_path": "./"})) # print(target_schema) file_name = cls.__name__ file_name = file_name.replace("Schema", "") schema_list.append(target_schema["definitions"][cls.__name__]) print(target_schema) schema = { "type": "object", "oneOf": schema_list } with open((f"Flow.schema.json"), "w") as f: f.write(json.dumps(schema, indent=4))
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/json_schema/EagerFlow.schema.json
{ "$schema": "http://json-schema.org/draft-07/schema#", "definitions": { "EagerFlowSchema": { "properties": { "additional_includes": { "title": "additional_includes", "type": "array", "items": { "title": "additional_includes", "type": "string" } }, "description": { "title": "description", "type": "string" }, "display_name": { "title": "display_name", "type": "string" }, "entry": { "title": "entry", "type": "string" }, "environment": { "title": "environment", "type": "object", "additionalProperties": {} }, "language": { "title": "language", "type": "string" }, "path": { "title": "path", "type": "string" }, "$schema": { "title": "$schema", "type": "string", "readOnly": true }, "tags": { "title": "tags", "type": "object", "additionalProperties": { "title": "tags", "type": "string" } }, "type": { "title": "type", "type": "string", "enum": [ "standard", "evaluation", "chat" ], "enumNames": [] } }, "type": "object", "required": [ "entry", "path" ], "additionalProperties": false } }, "$ref": "#/definitions/EagerFlowSchema" }
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/json_schema/Flow.schema.json
{ "type": "object", "oneOf": [ { "properties": { "additional_includes": { "title": "additional_includes", "type": "array", "items": { "title": "additional_includes", "type": "string" } }, "description": { "title": "description", "type": "string" }, "display_name": { "title": "display_name", "type": "string" }, "environment": { "title": "environment", "type": "object", "additionalProperties": {} }, "inputs": { "title": "inputs", "type": "object", "additionalProperties": { "type": "object", "$ref": "#/definitions/FlowInputSchema" } }, "language": { "title": "language", "type": "string" }, "node_variants": { "title": "node_variants", "type": "object", "additionalProperties": { "title": "node_variants", "type": "object", "additionalProperties": {} } }, "nodes": { "title": "nodes", "type": "array", "items": { "title": "nodes", "type": "object", "additionalProperties": {} } }, "outputs": { "title": "outputs", "type": "object", "additionalProperties": { "type": "object", "$ref": "#/definitions/FlowOutputSchema" } }, "$schema": { "title": "$schema", "type": "string", "readOnly": true }, "tags": { "title": "tags", "type": "object", "additionalProperties": { "title": "tags", "type": "string" } }, "type": { "title": "type", "type": "string", "enum": [ "standard", "evaluation", "chat" ], "enumNames": [] } }, "type": "object", "additionalProperties": false }, { "properties": { "additional_includes": { "title": "additional_includes", "type": "array", "items": { "title": "additional_includes", "type": "string" } }, "description": { "title": "description", "type": "string" }, "display_name": { "title": "display_name", "type": "string" }, "entry": { "title": "entry", "type": "string" }, "environment": { "title": "environment", "type": "object", "additionalProperties": {} }, "language": { "title": "language", "type": "string" }, "path": { "title": "path", "type": "string" }, "$schema": { "title": "$schema", "type": "string", "readOnly": true }, "tags": { "title": "tags", "type": "object", "additionalProperties": { "title": "tags", "type": "string" } }, "type": { "title": "type", "type": "string", "enum": [ "standard", "evaluation", "chat" ], "enumNames": [] } }, "type": "object", "required": [ "entry", "path" ], "additionalProperties": false } ] }
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/building/release-env.yml
name: release-env channels: - defaults - conda-forge dependencies: - pip - pip: - setuptools - twine==4.0.0 - portalocker~=1.2 - setuptools_rust - pytest - pytest-xdist - pytest-sugar - pytest-timeout - azure-keyvault - azure-identity
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/building/dev_setup.py
import argparse from pathlib import Path from platform import system from utils import print_blue, run_command def setup_promptflow(extra_deps: list, command_args: dict) -> None: print_blue("- Setting up the promptflow SDK ") print_blue("- Installing promptflow Python SDK from local directory") package_location = f"{Path('./src/promptflow/').absolute()}" if extra_deps: print_blue(f"- Installing with extra dependencies: {extra_deps}") extra_deps = ",".join(extra_deps) package_location = f"{package_location}[{extra_deps}]" cmds = ["pip", "install", "-e", package_location] print_blue(f"Running {cmds}") run_command(commands=cmds, **command_args) run_command( commands=["pip", "install", "-r", str(Path("./src/promptflow/dev_requirements.txt").absolute())], **command_args, ) if __name__ == "__main__": epilog = """ Sample Usages: python scripts/building/dev_setup.py python scripts/building/dev_setup.py --promptflow-extra-deps azure """ parser = argparse.ArgumentParser( description="Welcome to promptflow dev setup!", epilog=epilog, ) parser.add_argument( "--promptflow-extra-deps", required=False, nargs="+", type=str, help="extra dependencies for promptflow" ) parser.add_argument("-v", "--verbose", action="store_true", required=False, help="turn on verbose output") args = parser.parse_args() command_args = {"shell": system() == "Windows", "stream_stdout": args.verbose} setup_promptflow(extra_deps=args.promptflow_extra_deps, command_args=command_args) run_command(commands=["pre-commit", "install"], **command_args)
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/building/utils.py
import logging import os import subprocess import sys import time import traceback module_logger = logging.getLogger(__name__) class Color: PURPLE = "\033[95m" CYAN = "\033[96m" DARKCYAN = "\033[36m" BLUE = "\033[94m" GREEN = "\033[92m" YELLOW = "\033[93m" RED = "\033[91m" BOLD = "\033[1m" UNDERLINE = "\033[4m" END = "\033[0m" def print_red(message): print(Color.RED + message + Color.END) def print_blue(message): print(Color.BLUE + message + Color.END) def get_test_files(testpath): if os.path.isfile(testpath): return [testpath] else: res = [] for root, dirs, files in os.walk(testpath): module_logger.debug("Searching %s for files ending in 'tests.py'", root) res.extend([os.path.join(root, file) for file in files if file.endswith("tests.py")]) return res def retry(fn, num_attempts=3): if num_attempts <= 0: raise Exception("Illegal num_attempts: {}".format(num_attempts)) count = 0 for _ in range(0, num_attempts): try: return fn() except Exception: count += 1 print("Execution failed on attempt {} out of {}".format(count, num_attempts)) print("Exception trace:") traceback.print_exc() if count == num_attempts: print("Execution failed after {} attempts".format(count)) raise def _run_command( commands, cwd=None, stderr=subprocess.STDOUT, shell=False, env=None, stream_stdout=True, throw_on_retcode=True, logger=None, ): if logger is None: logger = module_logger if cwd is None: cwd = os.getcwd() t0 = time.perf_counter() try: logger.debug("[RunCommand]Executing {0} in {1}".format(commands, cwd)) out = "" p = subprocess.Popen(commands, stdout=subprocess.PIPE, stderr=stderr, cwd=cwd, shell=shell, env=env) for line in p.stdout: line = line.decode("utf-8").rstrip() if line and line.strip(): logger.debug(line) if stream_stdout: sys.stdout.write(line) sys.stdout.write("\n") out += line out += "\n" p.communicate() retcode = p.poll() if throw_on_retcode: if retcode: raise subprocess.CalledProcessError(retcode, p.args, output=out, stderr=p.stderr) return retcode, out finally: t1 = time.perf_counter() logger.debug("[RunCommand] Execution took {0}s for {1} in {2}".format(t1 - t0, commands, cwd)) def run_command( commands, cwd=None, stderr=subprocess.STDOUT, shell=False, stream_stdout=True, throw_on_retcode=True, logger=None ): return _run_command( commands, cwd=cwd, stderr=stderr, shell=shell, stream_stdout=stream_stdout, throw_on_retcode=throw_on_retcode, logger=logger, )
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/building/run_coverage_tests.py
import argparse import os import sys from pathlib import Path from utils import Color, run_command, print_red if __name__ == "__main__": parser = argparse.ArgumentParser(description=Color.RED + "Test Coverage for Promptflow!" + Color.END + "\n") parser.add_argument("-p", required=True, nargs="+", help="The paths to calculate code coverage") parser.add_argument("-t", required=True, nargs="+", help="The path to the tests") parser.add_argument("-l", required=True, help="Location to run tests in") parser.add_argument( "-m", required=True, help="Pytest marker to identify the tests to run", default="all", ) parser.add_argument( "-o", required=False, help="Pytest output file name", default="test-results.xml", ) parser.add_argument("-n", help="Pytest number of process to run the tests", default="auto") parser.add_argument( "--model-name", help="The model file name to run the tests", type=str, default="", ) parser.add_argument("--timeout", help="Timeout for individual tests (seconds)", type=str, default="") parser.add_argument( "--coverage-config", help="The path of code coverage config file", type=str, default="", ) parser.add_argument( "--disable-cov-branch", action="store_true", help="Whether to enable branch coverage calculation", ) parser.add_argument( "--ignore-glob", help="The path of ignored test file", type=str, default="", ) args = parser.parse_args() print("Working directory: " + str(os.getcwd())) print("Args.p: " + str(args.p)) print("Args.t: " + str(args.t)) print("Args.l: " + str(args.l)) print("Args.m: " + str(args.m)) print("Args.n: " + str(args.n)) print("Args.o: " + str(args.o)) print("Args.model-name: " + str(args.model_name)) print("Args.timeout: " + str(args.timeout)) print("Args.coverage-config: " + str(args.coverage_config)) print("Args.ignore-glob: " + str(args.ignore_glob)) print("Args.disable-cov-branch: " + str(args.disable_cov_branch)) test_paths_list = [str(Path(path).absolute()) for path in args.t] # display a list of all Python packages installed in the current Python environment run_command(["pip", "list"]) run_command(["pip", "show", "promptflow", "promptflow-sdk"]) pytest_command = ["pytest", f"--junitxml={args.o}"] pytest_command += test_paths_list if args.coverage_config: if args.p: cov_path_list = [f"--cov={path}" for path in args.p] pytest_command += cov_path_list if not args.disable_cov_branch: pytest_command += ["--cov-branch"] pytest_command += [ # noqa: W503 "--cov-report=term", "--cov-report=html", "--cov-report=xml", ] pytest_command = pytest_command + [f"--cov-config={args.coverage_config}"] if args.ignore_glob: pytest_command = pytest_command + [f"--ignore-glob={args.ignore_glob}"] pytest_command += [ "-n", args.n, "--dist", "loadfile", "--log-level=info", "--log-format=%(asctime)s %(levelname)s %(message)s", "--log-date-format=[%Y-%m-%d %H:%M:%S]", "--durations=5", "-ra", "-vv", ] if args.timeout: pytest_command = pytest_command + [ "--timeout", args.timeout, "--timeout_method", "thread", ] if args.m != "all": pytest_command = pytest_command + ["-m", args.m] if args.model_name: pytest_command = pytest_command + ["--model-name", args.model_name] # pytest --junit-xml=test-results.xml --cov=azure.ai.ml --cov-report=html --cov-report=xml -ra ./tests/*/unittests/ error_code, _ = run_command(pytest_command, throw_on_retcode=False) # https://docs.pytest.org/en/7.1.x/reference/exit-codes.html if error_code == 1: print_red("Tests were collected and run but some of the tests failed.") elif error_code == 2: print_red("Test execution was interrupted by the user.") elif error_code == 3: print_red("Internal error happened while executing tests.") elif error_code == 4: print_red("pytest command line usage error.") elif error_code == 5: print_red("No tests were collected.") sys.exit(error_code)
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/building/generate_connection_config.py
import argparse import json from pathlib import Path from azure.keyvault.secrets import SecretClient from azure.identity import ClientSecretCredential, DefaultAzureCredential CONNECTION_FILE_NAME = "connections.json" CONNECTION_TPL_FILE_PATH = Path(".") / "src/promptflow" / "dev-connections.json.example" def get_secret_client( tenant_id: str, client_id: str, client_secret: str ) -> SecretClient: try: if (tenant_id is None) or (client_id is None) or (client_secret is None): credential = DefaultAzureCredential() client = SecretClient( vault_url="https://promptflowprod.vault.azure.net/", credential=credential, ) else: credential = ClientSecretCredential(tenant_id, client_id, client_secret) client = SecretClient( vault_url="https://github-promptflow.vault.azure.net/", credential=credential, ) except Exception as e: print(e) return client def get_secret(secret_name: str, client: SecretClient): secret = client.get_secret(secret_name) return secret.value def list_secret_names(client: SecretClient) -> list: secret_properties = client.list_properties_of_secrets() return [secret.name for secret in secret_properties] def fill_key_to_dict(template_dict, keys_dict): if not isinstance(template_dict, dict): return for key, val in template_dict.items(): if isinstance(val, str) and val in keys_dict: template_dict[key] = keys_dict[val] continue fill_key_to_dict(val, keys_dict) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--tenant_id", type=str, help="The tenant id of the service principal" ) parser.add_argument( "--client_id", type=str, help="The client id of the service principal" ) parser.add_argument( "--client_secret", type=str, help="The client secret of the service principal" ) parser.add_argument( "--target_folder", type=str, help="The target folder to save the generated file" ) args = parser.parse_args() template_dict = json.loads( open(CONNECTION_TPL_FILE_PATH.resolve().absolute(), "r").read() ) file_path = ( (Path(".") / args.target_folder / CONNECTION_FILE_NAME) .resolve() .absolute() .as_posix() ) print(f"file_path: {file_path}") client = get_secret_client( tenant_id=args.tenant_id, client_id=args.client_id, client_secret=args.client_secret, ) all_secret_names = list_secret_names(client) data = { secret_name: get_secret(secret_name, client) for secret_name in all_secret_names } fill_key_to_dict(template_dict, data) with open(file_path, "w") as f: json.dump(template_dict, f)
0
promptflow_repo/promptflow/scripts/installer
promptflow_repo/promptflow/scripts/installer/curl_install_pypi/install.py
#!/usr/bin/env python # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # # This script will install the promptflow into a directory and create an executable # at a specified file path that is the entry point into the promptflow. # # The latest versions of all promptflow command packages will be installed. # import os import sys import platform import stat import tempfile import shutil import subprocess import hashlib PF_DISPATCH_TEMPLATE = """#!/usr/bin/env bash export PF_INSTALLER=Script {install_dir}/bin/python -m promptflow._cli._pf.entry "$@" """ PFAZURE_DISPATCH_TEMPLATE = """#!/usr/bin/env bash {install_dir}/bin/python -m promptflow._cli._pf_azure.entry "$@" """ PFS_DISPATCH_TEMPLATE = """#!/usr/bin/env bash {install_dir}/bin/python -m promptflow._sdk._service.entry "$@" """ DEFAULT_INSTALL_DIR = os.path.expanduser(os.path.join('~', 'lib', 'promptflow')) DEFAULT_EXEC_DIR = os.path.expanduser(os.path.join('~', 'bin')) PF_EXECUTABLE_NAME = 'pf' PFAZURE_EXECUTABLE_NAME = 'pfazure' PFS_EXECUTABLE_NAME = 'pfs' USER_BASH_RC = os.path.expanduser(os.path.join('~', '.bashrc')) USER_BASH_PROFILE = os.path.expanduser(os.path.join('~', '.bash_profile')) class CLIInstallError(Exception): pass def print_status(msg=''): print('-- '+msg) def prompt_input(msg): return input('\n===> '+msg) def prompt_input_with_default(msg, default): if default: return prompt_input("{} (leave blank to use '{}'): ".format(msg, default)) or default else: return prompt_input('{}: '.format(msg)) def prompt_y_n(msg, default=None): if default not in [None, 'y', 'n']: raise ValueError("Valid values for default are 'y', 'n' or None") y = 'Y' if default == 'y' else 'y' n = 'N' if default == 'n' else 'n' while True: ans = prompt_input('{} ({}/{}): '.format(msg, y, n)) if ans.lower() == n.lower(): return False if ans.lower() == y.lower(): return True if default and not ans: return default == y.lower() def exec_command(command_list, cwd=None, env=None): print_status('Executing: '+str(command_list)) subprocess.check_call(command_list, cwd=cwd, env=env) def create_tmp_dir(): tmp_dir = tempfile.mkdtemp() return tmp_dir def create_dir(dir): if not os.path.isdir(dir): print_status("Creating directory '{}'.".format(dir)) os.makedirs(dir) def is_valid_sha256sum(a_file, expected_sum): sha256 = hashlib.sha256() with open(a_file, 'rb') as f: sha256.update(f.read()) computed_hash = sha256.hexdigest() return expected_sum == computed_hash def create_virtualenv(install_dir): cmd = [sys.executable, '-m', 'venv', install_dir] exec_command(cmd) def install_cli(install_dir, tmp_dir): path_to_pip = os.path.join(install_dir, 'bin', 'pip') cmd = [path_to_pip, 'install', '--cache-dir', tmp_dir, 'promptflow[azure,executable,pfs,azureml-serving]', '--upgrade'] exec_command(cmd) cmd = [path_to_pip, 'install', '--cache-dir', tmp_dir, 'promptflow-tools', '--upgrade'] exec_command(cmd) cmd = [path_to_pip, 'install', '--cache-dir', tmp_dir, 'keyrings.alt', '--upgrade'] exec_command(cmd) def create_executable(exec_dir, install_dir): create_dir(exec_dir) exec_filepaths = [] for filename, template in [(PF_EXECUTABLE_NAME, PF_DISPATCH_TEMPLATE), (PFAZURE_EXECUTABLE_NAME, PFAZURE_DISPATCH_TEMPLATE), (PFS_EXECUTABLE_NAME, PFS_DISPATCH_TEMPLATE)]: exec_filepath = os.path.join(exec_dir, filename) with open(exec_filepath, 'w') as exec_file: exec_file.write(template.format(install_dir=install_dir)) cur_stat = os.stat(exec_filepath) os.chmod(exec_filepath, cur_stat.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) print_status("The executable is available at '{}'.".format(exec_filepath)) exec_filepaths.append(exec_filepath) return exec_filepaths def get_install_dir(): install_dir = None while not install_dir: prompt_message = 'In what directory would you like to place the install?' install_dir = prompt_input_with_default(prompt_message, DEFAULT_INSTALL_DIR) install_dir = os.path.realpath(os.path.expanduser(install_dir)) if ' ' in install_dir: print_status("The install directory '{}' cannot contain spaces.".format(install_dir)) install_dir = None else: create_dir(install_dir) if os.listdir(install_dir): print_status("'{}' is not empty and may contain a previous installation.".format(install_dir)) ans_yes = prompt_y_n('Remove this directory?', 'n') if ans_yes: shutil.rmtree(install_dir) print_status("Deleted '{}'.".format(install_dir)) create_dir(install_dir) else: # User opted to not delete the directory so ask for install directory again install_dir = None print_status("We will install at '{}'.".format(install_dir)) return install_dir def get_exec_dir(): exec_dir = None while not exec_dir: prompt_message = (f"In what directory would you like to place the " f"'{PFS_EXECUTABLE_NAME}/{PFS_EXECUTABLE_NAME}/{PFAZURE_EXECUTABLE_NAME}' executable?") exec_dir = prompt_input_with_default(prompt_message, DEFAULT_EXEC_DIR) exec_dir = os.path.realpath(os.path.expanduser(exec_dir)) if ' ' in exec_dir: print_status("The executable directory '{}' cannot contain spaces.".format(exec_dir)) exec_dir = None create_dir(exec_dir) print_status("The executable will be in '{}'.".format(exec_dir)) return exec_dir def _backup_rc(rc_file): try: shutil.copyfile(rc_file, rc_file+'.backup') print_status("Backed up '{}' to '{}'".format(rc_file, rc_file+'.backup')) except (OSError, IOError): pass def _get_default_rc_file(): bashrc_exists = os.path.isfile(USER_BASH_RC) bash_profile_exists = os.path.isfile(USER_BASH_PROFILE) if not bashrc_exists and bash_profile_exists: return USER_BASH_PROFILE if bashrc_exists and bash_profile_exists and platform.system().lower() == 'darwin': return USER_BASH_PROFILE return USER_BASH_RC if bashrc_exists else None def _default_rc_file_creation_step(): rcfile = USER_BASH_PROFILE if platform.system().lower() == 'darwin' else USER_BASH_RC ans_yes = prompt_y_n('Could not automatically find a suitable file to use. Create {} now?'.format(rcfile), default='y') if ans_yes: open(rcfile, 'a').close() return rcfile return None def _find_line_in_file(file_path, search_pattern): try: with open(file_path, 'r', encoding="utf-8") as search_file: for line in search_file: if search_pattern in line: return True except (OSError, IOError): pass return False def _modify_rc(rc_file_path, line_to_add): if not _find_line_in_file(rc_file_path, line_to_add): with open(rc_file_path, 'a', encoding="utf-8") as rc_file: rc_file.write('\n'+line_to_add+'\n') def get_rc_file_path(): rc_file = None default_rc_file = _get_default_rc_file() if not default_rc_file: rc_file = _default_rc_file_creation_step() rc_file = rc_file or prompt_input_with_default('Enter a path to an rc file to update', default_rc_file) if rc_file: rc_file_path = os.path.realpath(os.path.expanduser(rc_file)) if os.path.isfile(rc_file_path): return rc_file_path print_status("The file '{}' could not be found.".format(rc_file_path)) return None def warn_other_azs_on_path(exec_dir, exec_filepath): env_path = os.environ.get('PATH') conflicting_paths = [] if env_path: for p in env_path.split(':'): for file in [PF_EXECUTABLE_NAME, PFAZURE_EXECUTABLE_NAME, PFS_EXECUTABLE_NAME]: p_to_pf = os.path.join(p, file) if p != exec_dir and os.path.isfile(p_to_pf): conflicting_paths.append(p_to_pf) if conflicting_paths: print_status() print_status(f"** WARNING: Other '{PFS_EXECUTABLE_NAME}/{PFS_EXECUTABLE_NAME}/{PFAZURE_EXECUTABLE_NAME}' " f"executables are on your $PATH. **") print_status("Conflicting paths: {}".format(', '.join(conflicting_paths))) print_status("You can run this installation of the promptflow with '{}'.".format(exec_filepath)) def handle_path_and_tab_completion(exec_filepath, exec_dir): ans_yes = prompt_y_n('Modify profile to update your $PATH now?', 'y') if ans_yes: rc_file_path = get_rc_file_path() if not rc_file_path: raise CLIInstallError('No suitable profile file found.') _backup_rc(rc_file_path) line_to_add = "export PATH=$PATH:{}".format(exec_dir) _modify_rc(rc_file_path, line_to_add) warn_other_azs_on_path(exec_dir, exec_filepath) print_status() print_status('** Run `exec -l $SHELL` to restart your shell. **') print_status() else: print_status("You can run the promptflow with '{}'.".format(exec_filepath)) def verify_python_version(): print_status('Verifying Python version.') v = sys.version_info if v < (3, 8): raise CLIInstallError('The promptflow does not support Python versions less than 3.8.') if 'conda' in sys.version: raise CLIInstallError("This script does not support the Python Anaconda environment. " "Create an Anaconda virtual environment and install with 'pip'") print_status('Python version {}.{}.{} okay.'.format(v.major, v.minor, v.micro)) def _native_dependencies_for_dist(verify_cmd_args, install_cmd_args, dep_list): try: print_status("Executing: '{} {}'".format(' '.join(verify_cmd_args), ' '.join(dep_list))) subprocess.check_output(verify_cmd_args + dep_list, stderr=subprocess.STDOUT) print_status('Native dependencies okay.') except subprocess.CalledProcessError: err_msg = 'One or more of the following native dependencies are not currently installed and may be required.\n' err_msg += '"{}"'.format(' '.join(install_cmd_args + dep_list)) print_status(err_msg) ans_yes = prompt_y_n('Missing native dependencies. Attempt to continue anyway?', 'n') if not ans_yes: raise CLIInstallError('Please install the native dependencies and try again.') def _get_linux_distro(): if platform.system() != 'Linux': return None, None try: with open('/etc/os-release') as lines: tokens = [line.strip() for line in lines] except Exception: return None, None release_info = {} for token in tokens: if '=' in token: k, v = token.split('=', 1) release_info[k.lower()] = v.strip('"') return release_info.get('name', None), release_info.get('version_id', None) def verify_install_dir_exec_path_conflict(install_dir, exec_dir): for exec_name in [PF_EXECUTABLE_NAME, PFAZURE_EXECUTABLE_NAME, PFS_EXECUTABLE_NAME]: exec_path = os.path.join(exec_dir, exec_name) if install_dir == exec_path: raise CLIInstallError("The executable file '{}' would clash with the install directory of '{}'. Choose " "either a different install directory or directory to place the " "executable.".format(exec_path, install_dir)) def main(): verify_python_version() tmp_dir = create_tmp_dir() install_dir = get_install_dir() exec_dir = get_exec_dir() verify_install_dir_exec_path_conflict(install_dir, exec_dir) create_virtualenv(install_dir) install_cli(install_dir, tmp_dir) exec_filepath = create_executable(exec_dir, install_dir) try: handle_path_and_tab_completion(exec_filepath, exec_dir) except Exception as e: print_status("Unable to set up PATH. ERROR: {}".format(str(e))) shutil.rmtree(tmp_dir) print_status("Installation successful.") print_status("Run the CLI with {} --help".format(exec_filepath)) if __name__ == '__main__': try: main() except CLIInstallError as cie: print('ERROR: '+str(cie), file=sys.stderr) sys.exit(1) except KeyboardInterrupt: print('\n\nExiting...') sys.exit(1) # SIG # Begin signature block # Z1F07ShfIJ7kejST2NXwW1QcFPEya4xaO2xZz6vLT847zaMzbc/PaEa1RKFlD881 # 4J+i6Au2wtbHzOXDisyH6WeLQ3gh0X2gxFRa4EzW7Nzjcvwm4+WogiTcnPVVxlk3 # qafM/oyVqs3695K7W5XttOiq2guv/yedsf/TW2BKSEKruFQh9IwDfIiBoi9Zv3wa # iuzQulRR8KyrCtjEPDV0t4WnZVB/edQea6xJZeTlMG+uLR/miBTbPhUb/VZkVjBf # qHBv623oLXICzoTNuaPTln9OWvL2NZpisGYvNzebKO7/Ho6AOWZNs5XOVnjs0Ax2 # aeXvlwBzIQyfyxd25487/Q== # SIG # End signature block
0
promptflow_repo/promptflow/scripts/installer
promptflow_repo/promptflow/scripts/installer/curl_install_pypi/install
#!/usr/bin/env bash #--------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. #--------------------------------------------------------------------------------------------- # # Bash script to install the prompt flow # INSTALL_SCRIPT_URL="https://promptflowartifact.blob.core.windows.net/linux-install-scripts/install.py" _TTY=/dev/tty install_script=$(mktemp -t promptflow_install_tmp_XXXXXX) || exit echo "Downloading prompt flow install script from $INSTALL_SCRIPT_URL to $install_script." curl -# $INSTALL_SCRIPT_URL > $install_script || exit python_cmd=python3 if ! command -v python3 >/dev/null 2>&1 then echo "ERROR: python3 not found." echo "If python3 is available on the system, add it to PATH." exit 1 fi chmod 775 $install_script echo "Running install script." $python_cmd $install_script < $_TTY
0
promptflow_repo/promptflow/scripts/installer
promptflow_repo/promptflow/scripts/installer/curl_install_pypi/README.md
# Curl Install Script Information The scripts in this directory are used for installing through curl and they point to the packages on PyPI. ## Install or update promptflow curl https://promptflowartifact.blob.core.windows.net/linux-install-scripts/install | bash The script can also be downloaded and run locally. You may have to restart your shell in order for the changes to take effect. ## Uninstall promptflow Uninstall the promptflow by directly deleting the files from the location chosen at the time of installation. 1. Remove the installed CLI files. ```bash # The default install/executable location is the user's home directory ($HOME). rm -r $HOME/lib/promptflow rm $HOME/bin/pf rm $HOME/bin/pfs rm $HOME/bin/pfazure ``` 2. Modify your `$HOME/.bash_profile` or `$HOME/.bashrc` file to remove the following line: ```text export PATH=$PATH:$HOME/bin ``` 3. If using `bash` or `zsh`, reload your shell's command cache. ```bash hash -r ```
0
promptflow_repo/promptflow/scripts/installer
promptflow_repo/promptflow/scripts/installer/windows/product.wxs
<?xml version="1.0" encoding="UTF-8"?> <Wix xmlns="http://schemas.microsoft.com/wix/2006/wi"> <?define ProductVersion="$(env.CLI_VERSION)" ?> <?define ProductName = "promptflow" ?> <?define ProductDescription = "Command-line tools for prompt flow." ?> <?define ProductAuthor = "Microsoft Corporation" ?> <?define ProductResources = ".\resources\" ?> <?define UpgradeCode32 = "8b748161-e07a-48f2-8cdf-401480df4694" ?> <?if $(var.Platform) = "x64" ?> <?define PromptflowCliRegistryGuid = "0efd984f-9eec-425b-b230-a3994b69649a" ?> <?define PromptflowServiceGuid = "d4e99207-77be-4bdf-a430-b08632c5aa2b" ?> <?define PromptflowSystemPathGuid = "4c321045-d4e0-4446-bda4-8c19eaa42af1" ?> <?define ProgramFilesFolder = "ProgramFiles64Folder" ?> <?define RemovePromptflowFolderGuid = "ee843aa5-2b72-4958-be84-53dbac17efc7" ?> <?define UpgradeCode = "772aa21f-f8d4-4771-b910-1dbce3f1920c" ?> <?define Architecture = "64-bit" ?> <?elseif $(var.Platform) = "x86" ?> <?define PromptflowCliRegistryGuid = "7c2c792d-c395-44a1-8222-8e4ea006abb9" ?> <?define PromptflowServiceGuid = "f706b208-a15d-4ae7-9185-cfcc43656570" ?> <?define PromptflowSystemPathGuid = "9661fe6a-ff48-4e7c-a60d-fc34c2d06ef3" ?> <?define ProgramFilesFolder = "ProgramFilesFolder" ?> <?define RemovePromptflowFolderGuid = "588ca5e1-38c6-4659-8b38-762df7ed5b28" ?> <?define UpgradeCode = $(var.UpgradeCode32) ?> <?define Architecture = "32-bit" ?> <?else ?> <?error Unsupported platform "$(var.Platform)" ?> <?endif ?> <Product Id="*" Name="$(var.ProductName) ($(var.Architecture))" Language="1033" Version="$(var.ProductVersion)" Manufacturer="$(var.ProductAuthor)" UpgradeCode="$(var.UpgradeCode)"> <Package InstallerVersion="200" Compressed="yes" InstallScope="perUser" /> <Upgrade Id="$(var.UpgradeCode)"> <UpgradeVersion Property="WIX_UPGRADE_DETECTED" Maximum="$(var.ProductVersion)" IncludeMaximum="no" MigrateFeatures="yes" /> <UpgradeVersion Property="WIX_DOWNGRADE_DETECTED" Minimum="$(var.ProductVersion)" IncludeMinimum="no" OnlyDetect="yes" /> </Upgrade> <InstallExecuteSequence> <RemoveExistingProducts After="InstallExecute" /> </InstallExecuteSequence> <!-- New product architectures should upgrade the original x86 product - even of the same version. --> <?if $(var.UpgradeCode) != $(var.UpgradeCode32) ?> <Upgrade Id="$(var.UpgradeCode32)"> <UpgradeVersion Property="WIX_X86_UPGRADE_DETECTED" Maximum="$(var.ProductVersion)" IncludeMaximum="yes" MigrateFeatures="yes" /> <UpgradeVersion Property="WIX_X86_DOWNGRADE_DETECTED" Minimum="$(var.ProductVersion)" IncludeMinimum="no" OnlyDetect="yes" /> </Upgrade> <Condition Message="A newer version of $(var.ProductName) is already installed.">NOT (WIX_DOWNGRADE_DETECTED OR WIX_X86_DOWNGRADE_DETECTED)</Condition> <?else ?> <Condition Message="A newer version of $(var.ProductName) is already installed.">NOT WIX_DOWNGRADE_DETECTED</Condition> <?endif ?> <Media Id="1" Cabinet="promptflow.cab" EmbedCab="yes" CompressionLevel="high" /> <Icon Id="PromptflowIcon" SourceFile="$(var.ProductResources)logo32.ico" /> <Property Id="ARPPRODUCTICON" Value="PromptflowIcon" /> <Property Id="ARPHELPLINK" Value="https://microsoft.github.io/promptflow/how-to-guides/quick-start.html" /> <Property Id="ARPURLINFOABOUT" Value="https://microsoft.github.io/promptflow/how-to-guides/quick-start.html" /> <Property Id="ARPURLUPDATEINFO" Value="https://microsoft.github.io/promptflow/how-to-guides/quick-start.html" /> <Property Id="MSIFASTINSTALL" Value="7" /> <Property Id="ApplicationFolderName" Value="promptflow" /> <Property Id="WixAppFolder" Value="WixPerUserFolder" /> <Feature Id="ProductFeature" Title="promptflow" Level="1" AllowAdvertise="no"> <ComponentGroupRef Id="ProductComponents" /> </Feature> <!--Custom action to propagate path env variable change--> <CustomActionRef Id="WixBroadcastEnvironmentChange" /> <!-- User Interface --> <WixVariable Id="WixUILicenseRtf" Value="$(var.ProductResources)CLI_LICENSE.rtf"/> <UIRef Id="WixUI_ErrorProgressText"/> <!-- Show message to restart any terminals only if the PATH is changed --> <CustomAction Id="Set_WIXUI_EXITDIALOGOPTIONALTEXT" Property="WIXUI_EXITDIALOGOPTIONALTEXT" Value="Please close and reopen any active terminal window to use prompt flow." /> <InstallUISequence> <Custom Action="Set_WIXUI_EXITDIALOGOPTIONALTEXT" After="CostFinalize">NOT Installed AND NOT WIX_UPGRADE_DETECTED</Custom> </InstallUISequence> <CustomAction Id="StartPromptFlowService" Directory="APPLICATIONFOLDER" Execute="deferred" ExeCommand="wscript.exe promptflow_service.vbs" Return="asyncNoWait" /> <InstallExecuteSequence> <Custom Action="StartPromptFlowService" Before="InstallFinalize">NOT Installed OR WIX_UPGRADE_DETECTED</Custom> </InstallExecuteSequence> </Product> <Fragment> <Directory Id="TARGETDIR" Name="SourceDir"> <Directory Id="$(var.ProgramFilesFolder)"> <Directory Id="APPLICATIONFOLDER" Name="promptflow" /> </Directory> <Directory Id="StartupFolder" /> </Directory> <UIRef Id="WixUI_Advanced" /> </Fragment> <Fragment> <ComponentGroup Id="PromptflowCliSettingsGroup"> <Component Id="RemovePromptflowFolder" Directory="APPLICATIONFOLDER" Guid="$(var.RemovePromptflowFolderGuid)"> <RemoveFolder Id="APPLICATIONFOLDER" On="uninstall" /> </Component> <Component Id="PromptflowSystemPath" Directory="APPLICATIONFOLDER" Guid="$(var.PromptflowSystemPathGuid)"> <Environment Id="PromptflowAddedToPATH" Name="PATH" Value="[APPLICATIONFOLDER]" Permanent="no" Part="first" Action="set" System="no" /> <CreateFolder /> </Component> <Component Id="promptflow_service.vbs" Directory="APPLICATIONFOLDER" Guid="$(var.PromptflowServiceGuid)"> <File Id="promptflow_service.vbs" Source="scripts\promptflow_service.vbs" KeyPath="yes" Checksum="yes"/> </Component> <Component Id="ApplicationShortcut" Directory="StartupFolder" Guid="$(var.PromptflowCliRegistryGuid)"> <Shortcut Id="ApplicationStartMenuShortcut" Name="Prompt flow service" Description="Prompt Flow Service" Target="[#promptflow_service.vbs]" WorkingDirectory="APPLICATIONFOLDER" Advertise="no"> <Icon Id="PromptflowServiceIcon" SourceFile="$(var.ProductResources)logo32.ico" /> </Shortcut> <RemoveFile Id="CleanUpShortCut" Directory="StartupFolder" Name="Prompt flow service" On="uninstall"/> <RegistryKey Root="HKCU" Key="Software\Microsoft\$(var.ProductName)" Action="createAndRemoveOnUninstall"> <RegistryValue Name="installed" Type="integer" Value="1" /> <RegistryValue Name="version" Type="string" Value="$(var.ProductVersion)" KeyPath="yes"/> </RegistryKey> </Component> </ComponentGroup> <ComponentGroup Id="ProductComponents"> <ComponentGroupRef Id="PromptflowCliComponentGroup"/> <ComponentGroupRef Id="PromptflowCliSettingsGroup"/> </ComponentGroup> </Fragment> </Wix>
0
promptflow_repo/promptflow/scripts/installer
promptflow_repo/promptflow/scripts/installer/windows/install_from_msi.md
# Install prompt flow MSI installer on Windows Prompt flow is a suite of development tools designed to streamline the end-to-end development cycle of LLM-based AI applications, that can be installed locally on Windows computers. For Windows, the prompt flow is installed via an MSI, which gives you access to the CLI through the Windows Command Prompt (CMD) or PowerShell. ## Install or update The MSI distributable is used for installing or updating the prompt flow on Windows. You don't need to uninstall current versions before using the MSI installer because the MSI updates any existing version. ::::{tab-set} :::{tab-item} Microsoft Installer (MSI) :sync: Microsoft Installer (MSI) ### Latest version Download and install the latest release of the prompt flow. When the installer asks if it can make changes to your computer, select the "Yes" box. > [Latest release of the promptflow (64-bit)](https://aka.ms/installpromptflowwindowsx64) ) ### Specific version If you prefer, you can download a specific version of the promptflow by using a URL. To download the MSI installer for a specific version, change the version segment in URL https://promptflowartifact.blob.core.windows.net/msi-installer/promptflow-<version>.msi ::: :::{tab-item} Microsoft Installer (MSI) with PowerShell :sync: Microsoft Installer (MSI) with PowerShell ### PowerShell To install the prompt flow using PowerShell, start PowerShell and run the following command: ```PowerShell $ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest -Uri https://aka.ms/installpromptflowwindowsx64 -OutFile .\promptflow.msi; Start-Process msiexec.exe -Wait -ArgumentList '/I promptflow.msi /quiet'; Remove-Item .\promptflow.msi ``` This will download and install the latest 64-bit installer of the prompt flow for Windows. To install a specific version, replace the `-Uri` argument with the URL like below. Here is an example of using the 64-bit installer of the promptflow version 1.0.0 in PowerShell: ```PowerShell $ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest -Uri https://promptflowartifact.blob.core.windows.net/msi-installer/promptflow-1.0.0.msi -OutFile .\promptflow.msi; Start-Process msiexec.exe -Wait -ArgumentList '/I promptflow.msi /quiet'; Remove-Item .\promptflow.msi ``` ::: :::: ## Run the prompt flow You can now run the prompt flow with the `pf` or `pfazure` command from either Windows Command Prompt or PowerShell. ## Upgrade the prompt flow Beginning with version 1.4.0, the prompt flow provides an in-tool command to upgrade to the latest version. ```commandline pf upgrade ``` For prompt flow versions prior to 1.4.0, upgrade by reinstalling as described in Install the prompt flow. ## Uninstall You uninstall the prompt flow from the Windows "Apps and Features" list. To uninstall: | Platform | Instructions | |---|---| | Windows 11 | Start > Settings > Apps > Installed apps | | Windows 10 | Start > Settings > System > Apps & Features | | Windows 8 and Windows 7 | Start > Control Panel > Programs > Uninstall a program | Once on this screen type __promptflow_ into the program search bar. The program to uninstall is listed as __promptflow (64-bit)__. Select this application, then select the `Uninstall` button. ## FAQ ### Where is the prompt flow installed? In Windows, the 64-bit prompt flow installs in `C:\Users\**\AppData\Local\Apps\promptflow` by default. ### What version of the prompt flow is installed? Type `pf --version` in a terminal window to know what version of the prompt flow is installed. Your output looks like this: ```output promptflow x.x.x Executable '***\python.exe' Python (Windows) 3.*.* | packaged by conda-forge | * ```
0
promptflow_repo/promptflow/scripts/installer
promptflow_repo/promptflow/scripts/installer/windows/README.md
# Building the Windows MSI Installer This document provides instructions on creating the MSI installer. ## Option1: Building with Github Actions Trigger the [workflow](https://github.com/microsoft/promptflow/actions/workflows/build_msi_installer.yml) manually. ## Option2: Local Building ### Prerequisites 1. Turn on the '.NET Framework 3.5' Windows Feature (required for WIX Toolset). 2. Install 'Microsoft Build Tools 2015'. https://www.microsoft.com/download/details.aspx?id=48159 3. You need to have curl.exe, unzip.exe and msbuild.exe available under PATH. 4. Install 'WIX Toolset build tools' following the instructions below. - Enter the directory where the README is located (`cd scripts/installer/windows`), `mkdir wix` and `cd wix`. - `curl --output wix-archive.zip https://azurecliprod.blob.core.windows.net/msi/wix310-binaries-mirror.zip` - `unzip wix-archive.zip` and `del wix-archive.zip` 5. We recommend creating a clean virtual Python environment and installing all dependencies using src/promptflow/setup.py. - `python -m venv venv` - `venv\Scripts\activate` - `pip install promptflow[azure,executable,pfs] promptflow-tools` ### Building 1. Update the version number `$(env.CLI_VERSION)` and `$(env.FILE_VERSION)` in `product.wxs`, `promptflow.wixproj` and `version_info.txt`. 2. `cd scripts/installer/windows/scripts` and run `pyinstaller promptflow.spec`. 3. `cd scripts/installer/windows` and Run `msbuild /t:rebuild /p:Configuration=Release /p:Platform=x64 promptflow.wixproj`. 4. The unsigned MSI will be in the `scripts/installer/windows/out` folder. ## Notes - If you encounter "Access is denied" error when running promptflow. Please follow the [link](https://learn.microsoft.com/en-us/microsoft-365/security/defender-endpoint/attack-surface-reduction-rules-deployment-implement?view=o365-worldwide#customize-attack-surface-reduction-rules) to add the executable to the Windows Defender Attack Surface Reduction (ASR) rule.
0
promptflow_repo/promptflow/scripts/installer
promptflow_repo/promptflow/scripts/installer/windows/promptflow.wixproj
<?xml version="1.0" encoding="utf-8"?> <Project ToolsVersion="4.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <!-- Project --> <PropertyGroup> <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration> <Platform Condition=" '$(Platform)' == '' ">x86</Platform> <ProductVersion>3.10</ProductVersion> <ProjectGuid>04ff6707-750d-4474-89b3-7922c84721be</ProjectGuid> <SchemaVersion>2.0</SchemaVersion> <OutputName>promptflow-$(env.CLI_VERSION)</OutputName> <OutputType>Package</OutputType> <WixTargetsPath Condition=" '$(WixTargetsPath)' == '' AND '$(MSBuildExtensionsPath32)' != '' ">$(MSBuildExtensionsPath32)\Microsoft\WiX\v3.x\Wix.targets</WixTargetsPath> <WixTargetsPath Condition=" '$(WixTargetsPath)' == '' ">$(MSBuildExtensionsPath)\Microsoft\WiX\v3.x\Wix.targets</WixTargetsPath> </PropertyGroup> <!-- Local WiX --> <PropertyGroup> <LocalWixRoot>wix</LocalWixRoot> <WixToolPath>$(MSBuildThisFileDirectory)$(LocalWixRoot)</WixToolPath> <WixTargetsPath Condition="Exists('$(WixToolPath)\Wix.targets')">$(WixToolPath)\Wix.targets</WixTargetsPath> <WixTasksPath Condition="Exists('$(WixToolPath)\wixtasks.dll')">$(WixToolPath)\wixtasks.dll</WixTasksPath> <PromptflowSource>scripts\dist\promptflow</PromptflowSource> <LinkerAdditionalOptions>-fv</LinkerAdditionalOptions> </PropertyGroup> <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|x86' "> <OutputPath>out\$(Configuration)\</OutputPath> <IntermediateOutputPath>out\obj\$(Configuration)\</IntermediateOutputPath> <DefineConstants>Debug;PromptflowSource=$(PromptflowSource)</DefineConstants> </PropertyGroup> <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|x86' "> <OutputPath>out\</OutputPath> <IntermediateOutputPath>out\obj\$(Configuration)\</IntermediateOutputPath> <DefineConstants>PromptflowSource=$(PromptflowSource)</DefineConstants> </PropertyGroup> <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|x64' "> <OutputPath>out\$(Configuration)\</OutputPath> <IntermediateOutputPath>out\obj\$(Configuration)\</IntermediateOutputPath> <DefineConstants>Debug;PromptflowSource=$(PromptflowSource)</DefineConstants> </PropertyGroup> <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|x64' "> <OutputPath>out\</OutputPath> <IntermediateOutputPath>out\obj\$(Configuration)\</IntermediateOutputPath> <DefineConstants>PromptflowSource=$(PromptflowSource)</DefineConstants> </PropertyGroup> <ItemGroup> <Compile Include="out\promptflow.wxs"> <Link>promptflow.wxs</Link> </Compile> <Compile Include="product.wxs" /> </ItemGroup> <ItemGroup> <None Include=".\resources\logo_pf.png" /> </ItemGroup> <!-- UI --> <ItemGroup> <WixExtension Include="WixUIExtension"> <HintPath>$(WixExtDir)\WixUIExtension.dll</HintPath> <Name>WixUIExtension</Name> </WixExtension> <WixExtension Include="WixUtilExtension"> <HintPath>$(WixExtDir)\WixUtilExtension.dll</HintPath> <Name>WixUtilExtension</Name> </WixExtension> </ItemGroup> <Import Project="$(WixTargetsPath)" Condition=" '$(WixTargetsPath)' != '' " /> <Import Project="$(MSBuildExtensionsPath32)\Microsoft\WiX\v3.x\wix.targets" Condition=" '$(WixTargetsPath)' == '' AND Exists('$(MSBuildExtensionsPath32)\Microsoft\WiX\v3.x\wix.targets') " /> <Target Name="EnsureWixToolsetInstalled" Condition=" '$(WixTargetsImported)' != 'true' "> <Error Text="The WiX Toolset v3.10 build tools must be installed to build this project. To download the WiX Toolset, see https://wixtoolset.org/releases/v3.10/stable" /> </Target> <Target Name="BeforeBuild"> <HeatDirectory Directory="$(PromptflowSource)" ToolPath="$(WixToolPath)" AutogenerateGuids="true" ComponentGroupName="PromptflowCliComponentGroup" SuppressRootDirectory="true" DirectoryRefId="APPLICATIONFOLDER" OutputFile="out\promptflow.wxs" PreprocessorVariable="var.PromptflowSource" /> </Target> </Project>
0
promptflow_repo/promptflow/scripts/installer/windows
promptflow_repo/promptflow/scripts/installer/windows/scripts/promptflow.spec
# -*- mode: python ; coding: utf-8 -*- from PyInstaller.utils.hooks import collect_data_files from PyInstaller.utils.hooks import copy_metadata datas = [('../resources/CLI_LICENSE.rtf', '.'), ('../../../../src/promptflow/NOTICE.txt', '.'), ('../../../../src/promptflow/promptflow/_sdk/data/executable/', './promptflow/_sdk/data/executable/'), ('../../../../src/promptflow-tools/promptflow/tools/', './promptflow/tools/'), ('./pf.bat', '.'), ('./pfs.bat', '.'), ('./pfazure.bat', '.'), ('./pfsvc.bat', '.')] datas += collect_data_files('streamlit') datas += copy_metadata('streamlit') datas += collect_data_files('streamlit_quill') datas += collect_data_files('promptflow') hidden_imports = ['streamlit.runtime.scriptrunner.magic_funcs', 'win32timezone', 'promptflow'] block_cipher = None pfcli_a = Analysis( ['pfcli.py'], pathex=[], binaries=[], datas=datas, hiddenimports=hidden_imports, hookspath=[], hooksconfig={}, runtime_hooks=[], excludes=[], win_no_prefer_redirects=False, win_private_assemblies=False, cipher=block_cipher, noarchive=False, ) pfcli_pyz = PYZ(pfcli_a.pure, pfcli_a.zipped_data, cipher=block_cipher) pfcli_exe = EXE( pfcli_pyz, pfcli_a.scripts, [], exclude_binaries=True, name='pfcli', debug=False, bootloader_ignore_signals=False, strip=False, upx=True, console=True, disable_windowed_traceback=False, argv_emulation=False, target_arch=None, codesign_identity=None, entitlements_file=None, contents_directory='.', icon='../resources/logo32.ico', version="./version_info.txt", ) coll = COLLECT( pfcli_exe, pfcli_a.binaries, pfcli_a.zipfiles, pfcli_a.datas, strip=False, upx=True, upx_exclude=[], name='promptflow', )
0
promptflow_repo/promptflow/scripts/installer/windows
promptflow_repo/promptflow/scripts/installer/windows/scripts/pfs.bat
@echo off setlocal set MAIN_EXE=%~dp0.\pfcli.exe "%MAIN_EXE%" pfs %*
0
promptflow_repo/promptflow/scripts/installer/windows
promptflow_repo/promptflow/scripts/installer/windows/scripts/promptflow_service.vbs
DIM objshell set objshell = wscript.createobject("wscript.shell") iReturn = objshell.run("pfs.bat start --force", 0, true)
0
promptflow_repo/promptflow/scripts/installer/windows
promptflow_repo/promptflow/scripts/installer/windows/scripts/version_info.txt
# UTF-8 # # For more details about fixed file info 'ffi' see: # http://msdn.microsoft.com/en-us/library/ms646997.aspx VSVersionInfo( ffi=FixedFileInfo( # filevers and prodvers should be always a tuple with four items: (1, 2, 3, 4) # Set not needed items to zero 0. filevers=($(env.FILE_VERSION)), prodvers=(1, 0, 0, 0), # Contains a bitmask that specifies the valid bits 'flags'r mask=0x3f, # Contains a bitmask that specifies the Boolean attributes of the file. flags=0x0, # The operating system for which this file was designed. # 0x4 - NT and there is no need to change it. OS=0x4, # The general type of file. # 0x1 - the file is an application. fileType=0x1, # The function of the file. # 0x0 - the function is not defined for this fileType subtype=0x0, # Creation date and time stamp. date=(0, 0) ), kids=[ StringFileInfo( [ StringTable( '040904E4', [StringStruct('CompanyName', 'Microsoft Corporation'), StringStruct('FileDescription', 'Microsoft prompt flow'), StringStruct('FileVersion', '1.0.0.0'), StringStruct('InternalName', 'setup'), StringStruct('LegalCopyright', 'Copyright (c) Microsoft Corporation. All rights reserved.'), StringStruct('ProductName', 'Microsoft prompt flow'), StringStruct('ProductVersion', '$(env.CLI_VERSION)')]) ]), VarFileInfo([VarStruct('Translation', [1033, 1252])]) ] )
0
promptflow_repo/promptflow/scripts/installer/windows
promptflow_repo/promptflow/scripts/installer/windows/scripts/pfcli.py
import sys import multiprocessing # use this file as the only entry point for the CLI to avoid packaging the same environment repeatedly if __name__ == "__main__": multiprocessing.freeze_support() command = sys.argv[1] if len(sys.argv) > 1 else None sys.argv = sys.argv[1:] if command == 'pf': from promptflow._cli._pf.entry import main as pf_main pf_main() elif command == 'pfazure': from promptflow._cli._pf_azure.entry import main as pfazure_main pfazure_main() elif command == 'pfs': from promptflow._sdk._service.entry import main as pfs_main pfs_main() elif command == 'pfsvc': from promptflow._sdk._service.pfsvc import init as pfsvc_init pfsvc_init() else: print(f"Invalid command {sys.argv}. Please use 'pf', 'pfazure', 'pfs' or 'pfsvc'.")
0
promptflow_repo/promptflow/scripts/installer/windows
promptflow_repo/promptflow/scripts/installer/windows/scripts/pfsvc.bat
@echo off setlocal set MAIN_EXE=%~dp0.\pfcli.exe "%MAIN_EXE%" pfsvc %*
0
promptflow_repo/promptflow/scripts/installer/windows
promptflow_repo/promptflow/scripts/installer/windows/scripts/pf.bat
@echo off setlocal SET PF_INSTALLER=MSI set MAIN_EXE=%~dp0.\pfcli.exe "%MAIN_EXE%" pf %*
0
promptflow_repo/promptflow/scripts/installer/windows
promptflow_repo/promptflow/scripts/installer/windows/scripts/pfazure.bat
@echo off setlocal set MAIN_EXE=%~dp0.\pfcli.exe "%MAIN_EXE%" pfazure %*
0
promptflow_repo/promptflow/scripts/installer/windows
promptflow_repo/promptflow/scripts/installer/windows/resources/CLI_LICENSE.rtf
{\rtf1\ansi\ansicpg1252\cocoartf1504\cocoasubrtf820 {\fonttbl\f0\fnil\fcharset0 Tahoma;\f1\froman\fcharset0 TimesNewRomanPSMT;\f2\ftech\fcharset77 Symbol; } {\colortbl;\red255\green255\blue255;\red0\green0\blue255;} {\*\expandedcolortbl;;\csgenericrgb\c0\c0\c100000;} {\*\listtable{\list\listtemplateid1\listhybrid{\listlevel\levelnfc0\levelnfcn0\leveljc0\leveljcn0\levelfollow0\levelstartat1\levelspace360\levelindent0{\*\levelmarker \{decimal\}.}{\leveltext\leveltemplateid1\'02\'00.;}{\levelnumbers\'01;}\fi-360\li720\lin720 }{\listname ;}\listid1} {\list\listtemplateid2\listhybrid{\listlevel\levelnfc0\levelnfcn0\leveljc0\leveljcn0\levelfollow0\levelstartat1\levelspace360\levelindent0{\*\levelmarker \{decimal\}.}{\leveltext\leveltemplateid101\'02\'00.;}{\levelnumbers\'01;}\fi-360\li720\lin720 }{\listname ;}\listid2} {\list\listtemplateid3\listhybrid{\listlevel\levelnfc4\levelnfcn4\leveljc0\leveljcn0\levelfollow0\levelstartat1\levelspace360\levelindent0{\*\levelmarker \{lower-alpha\}.}{\leveltext\leveltemplateid201\'02\'00.;}{\levelnumbers\'01;}\fi-360\li720\lin720 }{\listname ;}\listid3} {\list\listtemplateid4\listhybrid{\listlevel\levelnfc0\levelnfcn0\leveljc0\leveljcn0\levelfollow0\levelstartat1\levelspace360\levelindent0{\*\levelmarker \{decimal\}.}{\leveltext\leveltemplateid301\'02\'00.;}{\levelnumbers\'01;}\fi-360\li720\lin720 }{\listname ;}\listid4} {\list\listtemplateid5\listhybrid{\listlevel\levelnfc23\levelnfcn23\leveljc0\leveljcn0\levelfollow0\levelstartat1\levelspace360\levelindent0{\*\levelmarker \{disc\}}{\leveltext\leveltemplateid401\'01\uc0\u8226 ;}{\levelnumbers;}\fi-360\li720\lin720 }{\listname ;}\listid5} {\list\listtemplateid6\listhybrid{\listlevel\levelnfc0\levelnfcn0\leveljc0\leveljcn0\levelfollow0\levelstartat1\levelspace360\levelindent0{\*\levelmarker \{decimal\}.}{\leveltext\leveltemplateid501\'02\'00.;}{\levelnumbers\'01;}\fi-360\li720\lin720 }{\listname ;}\listid6} {\list\listtemplateid7\listhybrid{\listlevel\levelnfc4\levelnfcn4\leveljc0\leveljcn0\levelfollow0\levelstartat1\levelspace360\levelindent0{\*\levelmarker \{lower-alpha\}.}{\leveltext\leveltemplateid601\'02\'00.;}{\levelnumbers\'01;}\fi-360\li720\lin720 }{\listname ;}\listid7} {\list\listtemplateid8\listhybrid{\listlevel\levelnfc0\levelnfcn0\leveljc0\leveljcn0\levelfollow0\levelstartat1\levelspace360\levelindent0{\*\levelmarker \{decimal\}.}{\leveltext\leveltemplateid701\'02\'00.;}{\levelnumbers\'01;}\fi-360\li720\lin720 }{\listname ;}\listid8}} {\*\listoverridetable{\listoverride\listid1\listoverridecount0\ls1}{\listoverride\listid2\listoverridecount0\ls2}{\listoverride\listid3\listoverridecount0\ls3}{\listoverride\listid4\listoverridecount0\ls4}{\listoverride\listid5\listoverridecount0\ls5}{\listoverride\listid6\listoverridecount0\ls6}{\listoverride\listid7\listoverridecount0\ls7}{\listoverride\listid8\listoverridecount0\ls8}} \margl1440\margr1440\vieww10800\viewh8400\viewkind0 \deftab720 \pard\pardeftab720\ri0\sb120\sa120\partightenfactor0 \f0\b\fs20 \cf0 MICROSOFT SOFTWARE LICENSE TERMS\ Microsoft prompt flow \f1 \ \pard\pardeftab720\ri0\sb120\sa120\partightenfactor0 \f0\b0 \cf0 These license terms are an agreement between Microsoft Corporation (or based on where you live, one of its affiliates) and you. They apply to the software named above. The terms also apply to any Microsoft services or updates for the software, except to the extent those have different terms. \f1 \ \pard\pardeftab720\ri0\sb120\sa120\partightenfactor0 \f0\b \cf0 IF YOU COMPLY WITH THESE LICENSE TERMS, YOU HAVE THE RIGHTS BELOW. \f1 \ \pard\tx360\pardeftab720\li357\fi-357\ri0\sb120\sa120\partightenfactor0 \ls1\ilvl0 \f0 \cf0 1. INSTALLATION AND USE RIGHTS. \f1\b0 \ \pard\pardeftab720\li357\ri0\sb120\sa120\partightenfactor0 \f0 \cf0 You may install and use any number of copies of the software.\ \pard\tx450\pardeftab720\li447\fi-357\ri0\sb120\sa120\partightenfactor0 \ls2\ilvl0 \b \cf0 2. TERMS FOR SPECIFIC COMPONENTS \f1 .\ \pard\tx4950\pardeftab720\li720\fi-270\ri0\sb120\sa120\partightenfactor0 \ls3\ilvl0 \f0 \cf0 a. Third Party Components \f1 . \f0\b0 The software may include third party components with separate legal notices or governed by other agreements, as may be described in the ThirdPartyNotices file(s) accompanying the software. Even if such components are governed by other agreements, the disclaimers and the limitations on and exclusions of damages below also apply. \f1 \ \pard\tx450\pardeftab720\li450\fi-357\ri0\sb120\sa120\partightenfactor0 \ls4\ilvl0 \f0\b \cf0 3. DATA. \b0 The software may collect information about you and your use of the software, and send that to Microsoft. Microsoft may use this information to provide services and improve our products and services. You may opt-out of many of these scenarios, but not all, as described in the product documentation. There are also some features in the software that may enable you and Microsoft to collect data from users of your applications. If you use these features, you must comply with applicable law, including providing appropriate notices to users of your applications and you should provide a copy of Microsoft\'92s privacy statement to your users. The Microsoft privacy statement is located here {\field{\*\fldinst{HYPERLINK "https://go.microsoft.com/fwlink/?LinkID=824704"}}{\fldrslt \cf2 \ul \ulc2 https://go.microsoft.com/fwlink/?LinkID=824704}}. You can learn more about data collection and use in the help documentation and our privacy statement. Your use of the software operates as your consent to these practices.\ \pard\tx360\pardeftab720\li357\fi-357\ri0\sb120\sa120\partightenfactor0 \ls4\ilvl0 \b \cf0 4. SCOPE OF LICENSE. \b0 The software is licensed, not sold. This agreement only gives you some rights to use the software. Microsoft reserves all other rights. Unless applicable law gives you more rights despite this limitation, you may use the software only as expressly permitted in this agreement. In doing so, you must comply with any technical limitations in the software that only allow you to use it in certain ways. You may not\ \pard\tx720\pardeftab720\li720\fi-363\ri0\sb120\sa120\partightenfactor0 \ls5\ilvl0 \f2 \cf0 \'a5 \f0 work around any technical limitations in the software;\ \ls5\ilvl0 \f2 \'a5 \f0 reverse engineer, decompile or disassemble the software, or otherwise attempt to derive the source code for the software except, and only to the extent required by third party licensing terms governing the use of certain open source components that may be included in the software;\ \ls5\ilvl0 \f2 \'a5 \f0 remove, minimize, block or modify any notices of Microsoft or its suppliers in the software; \ \ls5\ilvl0 \f2 \'a5 \f0 use the software in any way that is against the law; or\ \ls5\ilvl0 \f2 \'a5 \f0 share, publish, rent or lease the software, or provide the software as a stand-alone hosted as solution for others to use, or transfer the software or this agreement to any third party.\ \pard\tx360\pardeftab720\li357\fi-267\ri0\sb120\sa120\partightenfactor0 \ls6\ilvl0 \b \cf0 5. EXPORT RESTRICTIONS. \b0 You must comply with all domestic and international export laws and regulations that apply to the software, which include restrictions on destinations, end users, and end use. For further information on export restrictions, visit {\field{\*\fldinst{HYPERLINK "http://www.microsoft.com/exporting"}}{\fldrslt \cf2 \ul \ulc2 www.microsoft.com/exporting}}. \f1 \cf2 \ul \ulc2 \ \pard\tx450\pardeftab720\li447\fi-357\ri0\sb120\sa120\partightenfactor0 \ls6\ilvl0 \f0\b \cf0 \ulnone 6. SUPPORT SERVICES. \b0 Because this software is \'93as is,\'94 we may not provide support services for it.\ \ls6\ilvl0 \b 7. ENTIRE AGREEMENT. \b0 This agreement, and the terms for supplements, updates, Internet-based services and support services that you use, are the entire agreement for the software and support services.\ \ls6\ilvl0 \b 8. APPLICABLE LAW. \b0 If you acquired the software in the United States, Washington law applies to interpretation of and claims for breach of this agreement, and the laws of the state where you live apply to all other claims. If you acquired the software in any other country, its laws apply. \f1\b \ \ls6\ilvl0 \f0 9. CONSUMER RIGHTS; REGIONAL VARIATIONS. \b0 This agreement describes certain legal rights. You may have other rights, including consumer rights, under the laws of your state or country. Separate and apart from your relationship with Microsoft, you may also have rights with respect to the party from which you acquired the software. This agreement does not change those other rights if the laws of your state or country do not permit it to do so. For example, if you acquired the software in one of the below regions, or mandatory country law applies, then the following provisions apply to you:\ \pard\pardeftab720\li720\fi-270\ri0\sb120\sa120\partightenfactor0 \ls7\ilvl0 \b \cf0 b. Australia. \b0 You have statutory guarantees under the Australian Consumer Law and nothing in this agreement is intended to affect those rights.\ \pard\pardeftab720\li717\fi-267\ri0\sb120\sa120\partightenfactor0 \ls7\ilvl0 \b \cf0 c. Canada. \b0 If you acquired this software in Canada, you may stop receiving updates by turning off the automatic update feature, disconnecting your device from the Internet (if and when you re-connect to the Internet, however, the software will resume checking for and installing updates), or uninstalling the software. The product documentation, if any, may also specify how to turn off updates for your specific device or software.\ \ls7\ilvl0 \b d. Germany and Austria \f1\b0 .\ \pard\pardeftab720\li717\ri0\sb120\sa120\partightenfactor0 \f0\b \cf0 (i) \f1\b0 \f0\b Warranty \b0 . The properly licensed software will perform substantially as described in any Microsoft materials that accompany the software. However, Microsoft gives no contractual guarantee in relation to the licensed software.\ \b (ii) \f1\b0 \f0\b Limitation of Liability \b0 . In case of intentional conduct, gross negligence, claims based on the Product Liability Act, as well as, in case of death or personal or physical injury, Microsoft is liable according to the statutory law.\ Subject to the foregoing clause (ii), Microsoft will only be liable for slight negligence if Microsoft is in breach of such material contractual obligations, the fulfillment of which facilitate the due performance of this agreement, the breach of which would endanger the purpose of this agreement and the compliance with which a party may constantly trust in (so-called "cardinal obligations"). In other cases of slight negligence, Microsoft will not be liable for slight negligence.\ \pard\tx450\pardeftab720\li447\fi-357\ri0\sb120\sa120\partightenfactor0 \ls8\ilvl0 \b \cf0 10. DISCLAIMER OF WARRANTY. THE SOFTWARE IS LICENSED \'93AS-IS.\'94 YOU BEAR THE RISK OF USING IT. MICROSOFT GIVES NO EXPRESS WARRANTIES, GUARANTEES OR CONDITIONS. TO THE EXTENT PERMITTED UNDER YOUR LOCAL LAWS, MICROSOFT EXCLUDES THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.\ 11. LIMITATION ON AND EXCLUSION OF DAMAGES. YOU CAN RECOVER FROM MICROSOFT AND ITS SUPPLIERS ONLY DIRECT DAMAGES UP TO U.S. $5.00. YOU CANNOT RECOVER ANY OTHER DAMAGES, INCLUDING CONSEQUENTIAL, LOST PROFITS, SPECIAL, INDIRECT OR INCIDENTAL DAMAGES.\ \pard\pardeftab720\li450\ri0\sb120\sa120\partightenfactor0 \b0 \cf0 This limitation applies to (a) anything related to the software, services, content (including code) on third party Internet sites, or third party applications; and (b) claims for breach of contract, breach of warranty, guarantee or condition, strict liability, negligence, or other tort to the extent permitted by applicable law.\ It also applies even if Microsoft knew or should have known about the possibility of the damages. The above limitation or exclusion may not apply to you because your country may not allow the exclusion or limitation of incidental, consequential or other damages.}
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow/dev-connections.json.example
{ "azure_open_ai_connection": { "type": "AzureOpenAIConnection", "value": { "api_key": "aoai-api-key", "api_base": "aoai-api-endpoint", "api_type": "azure", "api_version": "2023-07-01-preview" }, "module": "promptflow.connections" }, "bing_config": { "type": "BingConnection", "value": { "api_key": "bing-api-key" }, "module": "promptflow.connections" }, "bing_connection": { "type": "BingConnection", "value": { "api_key": "bing-api-key" }, "module": "promptflow.connections" }, "azure_content_safety_config": { "type": "AzureContentSafetyConnection", "value": { "api_key": "content-safety-api-key", "endpoint": "https://content-safety-canary-test.cognitiveservices.azure.com", "api_version": "2023-04-30-preview" }, "module": "promptflow.connections" }, "serp_connection": { "type": "SerpConnection", "value": { "api_key": "serpapi-api-key" }, "module": "promptflow.connections" }, "translate_connection": { "type": "CustomConnection", "value": { "api_key": "<your-key>", "api_endpoint": "https://api.cognitive.microsofttranslator.com/", "api_region": "global" }, "module": "promptflow.connections", "module": "promptflow.connections", "secret_keys": [ "api_key" ] }, "custom_connection": { "type": "CustomConnection", "value": { "key1": "hey", "key2": "val2" }, "module": "promptflow.connections", "secret_keys": [ "key1" ] }, "custom_strong_type_connection": { "type": "CustomConnection", "value": { "api_key": "<your-key>", "api_base": "This is my first custom connection.", "promptflow.connection.custom_type": "MyFirstConnection", "promptflow.connection.module": "my_tool_package.connections" }, "module": "promptflow.connections", "secret_keys": [ "api_key" ] }, "open_ai_connection": { "type": "OpenAIConnection", "value": { "api_key": "<your-key>", "organization": "<your-organization>" }, "module": "promptflow.connections" } }
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow/pyproject.toml
[tool.black] line-length = 120 [tool.pytest.ini_options] markers = [ "sdk_test", "cli_test", "unittest", "e2etest", "flaky", "endpointtest", "mt_endpointtest", ] [tool.coverage.run] omit = [ # omit anything in a _restclient directory anywhere "*/_restclient/*", ]
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow/pf
#!/usr/bin/env python import sys import os if os.environ.get('PF_INSTALLER') is None: os.environ['PF_INSTALLER'] = 'PIP' os.execl(sys.executable, sys.executable, '-m', 'promptflow._cli._pf.entry', *sys.argv[1:])
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow/dev_requirements.txt
azure-identity azure-keyvault beautifulsoup4==4.12.2 coverage keyrings.alt # this is possibly insecure, do not add this to setup.py mock nox portalocker~=1.2 pre-commit # promptflow-tools Comment this since it will take in unecessary dependencies pydash PyJWT==2.8.0 # parse token to get tenant id during sanitization pytest pytest-asyncio pytest-cov pytest-forked pytest-mock pytest-nunit pytest-sugar pytest-timeout pytest-xdist setuptools setuptools_rust twine==4.0.0 vcrpy==5.1.0 # record and replay http requests for pfazure tests wheel httpx # test dummy flow run in notebook, give a minimal version for vulnerability issue ipykernel>=6.27.1 papermill>=2.5.0
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow/MANIFEST.in
include promptflow/azure/resources/* include promptflow/_sdk/_serving/static/* recursive-include promptflow/_cli/data * recursive-include promptflow/_sdk/data *
0