repo_id
stringlengths 15
132
| file_path
stringlengths 34
176
| content
stringlengths 2
3.52M
| __index_level_0__
int64 0
0
|
---|---|---|---|
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/get_calorie_by_jogging.py | import random
import time
from promptflow import tool
@tool
def get_calorie_by_jogging(duration: float, temperature: float):
"""Estimate the calories burned by jogging based on duration and temperature.
:param duration: the length of the jogging in hours.
:type duration: float
:param temperature: the environment temperature in degrees Celsius.
:type temperature: float
"""
print(
f"Figure out the calories burned by jogging, with temperature of {temperature} degrees Celsius, "
f"and duration of {duration} hours."
)
# Generating a random number between 0.2 and 1 for tracing purpose
time.sleep(random.uniform(0.2, 1))
return random.randint(50, 100)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/add_message_and_run.py | import asyncio
import json
from openai import AsyncOpenAI
from openai.types.beta.threads import MessageContentImageFile, MessageContentText
from promptflow import tool, trace
from promptflow.connections import OpenAIConnection
from promptflow.contracts.multimedia import Image
from promptflow.contracts.types import AssistantDefinition
from promptflow.exceptions import SystemErrorException
from promptflow.executor._assistant_tool_invoker import AssistantToolInvoker
URL_PREFIX = "https://platform.openai.com/files/"
RUN_STATUS_POLLING_INTERVAL_IN_MILSEC = 1000
@tool
async def add_message_and_run(
conn: OpenAIConnection,
assistant_id: str,
thread_id: str,
message: list,
assistant_definition: AssistantDefinition,
download_images: bool,
):
cli = await get_openai_api_client(conn)
invoker = await get_assisant_tool_invoker(assistant_definition)
# Check if assistant id is valid. If not, create a new assistant.
# Note: tool registration at run creation, rather than at assistant creation.
if not assistant_id:
assistant = await create_assistant(cli, assistant_definition)
assistant_id = assistant.id
await add_message(cli, message, thread_id)
run = await start_run(cli, assistant_id, thread_id, assistant_definition, invoker)
await wait_for_run_complete(cli, thread_id, invoker, run)
messages = await get_message(cli, thread_id)
file_id_references = await get_openai_file_references(messages.data[0].content, download_images, conn)
return {"content": to_pf_content(messages.data[0].content), "file_id_references": file_id_references}
@trace
async def get_openai_api_client(conn: OpenAIConnection):
cli = AsyncOpenAI(api_key=conn.api_key, organization=conn.organization)
return cli
@trace
async def get_assisant_tool_invoker(assistant_definition: AssistantDefinition):
invoker = AssistantToolInvoker.init(assistant_definition.tools)
return invoker
@trace
async def create_assistant(cli: AsyncOpenAI, assistant_definition: AssistantDefinition):
assistant = await cli.beta.assistants.create(
instructions=assistant_definition.instructions, model=assistant_definition.model
)
print(f"Created assistant: {assistant.id}")
return assistant
@trace
async def add_message(cli: AsyncOpenAI, message: list, thread_id: str):
content = extract_text_from_message(message)
file_ids = await extract_file_ids_from_message(cli, message)
msg = await cli.beta.threads.messages.create(thread_id=thread_id, role="user", content=content, file_ids=file_ids)
print("Created message message_id: {msg.id}, assistant_id: {assistant_id}, thread_id: {thread_id}")
return msg
@trace
async def start_run(
cli: AsyncOpenAI,
assistant_id: str,
thread_id: str,
assistant_definition: AssistantDefinition,
invoker: AssistantToolInvoker,
):
tools = invoker.to_openai_tools()
run = await cli.beta.threads.runs.create(
assistant_id=assistant_id,
thread_id=thread_id,
model=assistant_definition.model,
instructions=assistant_definition.instructions,
tools=tools,
)
print(f"Assistant_id: {assistant_id}, thread_id: {thread_id}, run_id: {run.id}")
return run
async def wait_for_status_check():
await asyncio.sleep(RUN_STATUS_POLLING_INTERVAL_IN_MILSEC / 1000.0)
async def get_run_status(cli: AsyncOpenAI, thread_id: str, run_id: str):
run = await cli.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
print(f"Run status: {run.status}")
return run
@trace
async def get_tool_calls_outputs(invoker: AssistantToolInvoker, run):
tool_calls = run.required_action.submit_tool_outputs.tool_calls
tool_outputs = []
for tool_call in tool_calls:
tool_name = tool_call.function.name
tool_args = json.loads(tool_call.function.arguments)
print(f"Invoking tool: {tool_call.function.name} with args: {tool_args}")
output = invoker.invoke_tool(tool_name, tool_args)
tool_outputs.append(
{
"tool_call_id": tool_call.id,
"output": str(output),
}
)
print(f"Tool output: {str(output)}")
return tool_outputs
@trace
async def submit_tool_calls_outputs(cli: AsyncOpenAI, thread_id: str, run_id: str, tool_outputs: list):
await cli.beta.threads.runs.submit_tool_outputs(thread_id=thread_id, run_id=run_id, tool_outputs=tool_outputs)
print(f"Submitted all required resonses for run: {run_id}")
@trace
async def require_actions(cli: AsyncOpenAI, thread_id: str, run, invoker: AssistantToolInvoker):
tool_outputs = await get_tool_calls_outputs(invoker, run)
await submit_tool_calls_outputs(cli, thread_id, run.id, tool_outputs)
@trace
async def wait_for_run_complete(cli: AsyncOpenAI, thread_id: str, invoker: AssistantToolInvoker, run):
while run.status != "completed":
await wait_for_status_check()
run = await get_run_status(cli, thread_id, run.id)
if run.status == "requires_action":
await require_actions(cli, thread_id, run, invoker)
elif run.status == "in_progress" or run.status == "completed":
continue
else:
raise Exception(f"The assistant tool runs in '{run.status}' status. Message: {run.last_error.message}")
@trace
async def get_run_steps(cli: AsyncOpenAI, thread_id: str, run_id: str):
run_steps = await cli.beta.threads.runs.steps.list(thread_id=thread_id, run_id=run_id)
print("step details: \n")
for step_data in run_steps.data:
print(step_data.step_details)
@trace
async def get_message(cli: AsyncOpenAI, thread_id: str):
messages = await cli.beta.threads.messages.list(thread_id=thread_id)
return messages
def extract_text_from_message(message: list):
content = []
for m in message:
if isinstance(m, str):
content.append(m)
continue
message_type = m.get("type", "")
if message_type == "text" and "text" in m:
content.append(m["text"])
return "\n".join(content)
async def extract_file_ids_from_message(cli: AsyncOpenAI, message: list):
file_ids = []
for m in message:
if isinstance(m, str):
continue
message_type = m.get("type", "")
if message_type == "file_path" and "file_path" in m:
path = m["file_path"].get("path", "")
if path:
file = await cli.files.create(file=open(path, "rb"), purpose="assistants")
file_ids.append(file.id)
return file_ids
async def get_openai_file_references(content: list, download_image: bool, conn: OpenAIConnection):
file_id_references = {}
for item in content:
if isinstance(item, MessageContentImageFile):
file_id = item.image_file.file_id
if download_image:
file_id_references[file_id] = {
"content": await download_openai_image(file_id, conn),
"url": URL_PREFIX + file_id,
}
else:
file_id_references[file_id] = {"url": URL_PREFIX + file_id}
elif isinstance(item, MessageContentText):
for annotation in item.text.annotations:
if annotation.type == "file_path":
file_id = annotation.file_path.file_id
file_id_references[file_id] = {"url": URL_PREFIX + file_id}
elif annotation.type == "file_citation":
file_id = annotation.file_citation.file_id
file_id_references[file_id] = {"url": URL_PREFIX + file_id}
else:
raise Exception(f"Unsupported content type: '{type(item)}'.")
return file_id_references
def to_pf_content(content: list):
pf_content = []
for item in content:
if isinstance(item, MessageContentImageFile):
file_id = item.image_file.file_id
pf_content.append({"type": "image_file", "image_file": {"file_id": file_id}})
elif isinstance(item, MessageContentText):
text_dict = {"type": "text", "text": {"value": item.text.value, "annotations": []}}
for annotation in item.text.annotations:
annotation_dict = {
"type": "file_path",
"text": annotation.text,
"start_index": annotation.start_index,
"end_index": annotation.end_index,
}
if annotation.type == "file_path":
annotation_dict["file_path"] = {"file_id": annotation.file_path.file_id}
elif annotation.type == "file_citation":
annotation_dict["file_citation"] = {"file_id": annotation.file_citation.file_id}
text_dict["text"]["annotations"].append(annotation_dict)
pf_content.append(text_dict)
else:
raise SystemErrorException(f"Unsupported content type: {type(item)}")
return pf_content
async def download_openai_image(file_id: str, conn: OpenAIConnection):
cli = AsyncOpenAI(api_key=conn.api_key, organization=conn.organization)
image_data = await cli.files.content(file_id)
return Image(image_data.read())
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/flow.dag.yaml | environment:
python_requirements_txt: requirements.txt
version: 2
inputs:
chat_history:
type: list
is_chat_history: true
default: []
question:
type: string
is_chat_input: true
default: I am going to swim today for 30 min in Guangzhou city, how much
calories will I burn?
assistant_id:
type: string
default: ""
thread_id:
type: string
default: ""
outputs:
answer:
type: string
reference: ${assistant.output}
is_chat_output: true
thread_id:
type: string
reference: ${get_or_create_thread.output}
nodes:
- name: get_or_create_thread
type: python
source:
type: code
path: get_or_create_thread.py
inputs:
conn: chw-manager-OpenAI
thread_id: ${inputs.thread_id}
- name: assistant
type: python
source:
type: code
path: add_message_and_run.py
inputs:
conn: chw-manager-OpenAI
message: ${inputs.question}
assistant_id: ${inputs.assistant_id}
thread_id: ${get_or_create_thread.output}
download_images: true
assistant_definition: assistant_definition.yaml
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/activate_flow/print_input.py | from promptflow import tool
@tool
def print_input(input: str) -> str:
return input
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/activate_flow/flow.dag.yaml | inputs:
text:
type: string
default: world
outputs:
output1:
type: string
reference: ${nodeC.output}
output2:
type: string
reference: ${nodeD.output}
nodes:
- name: nodeA
type: python
source:
type: code
path: print_input.py
inputs:
input: ${inputs.text}
activate:
when: ${inputs.text}
is: hello
- name: nodeB
type: python
source:
type: code
path: print_input.py
inputs:
input: ${inputs.text}
activate:
when: ${nodeA.output}
is: hello
- name: nodeC
type: python
source:
type: code
path: print_input.py
inputs:
input: ${nodeB.output}
- name: nodeD
type: python
source:
type: code
path: print_input.py
inputs:
input: ${inputs.text}
activate:
when: ${inputs.text}
is: world
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/simple_flow_with_python_tool/inputs.jsonl | {"num": "hello"} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/simple_flow_with_python_tool/divide_num.py | from promptflow import tool
@tool
def divide_num(num: int) -> int:
return (int)(num / 2)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/simple_flow_with_python_tool/flow.dag.yaml | inputs:
num:
type: int
outputs:
content:
type: string
reference: ${divide_num.output}
nodes:
- name: divide_num
type: python
source:
type: code
path: divide_num.py
inputs:
num: ${inputs.num}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/default_input/samples.json | [
{
"text": "text_1"
}
] | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/default_input/test_print_aggregation.py | from typing import List
from promptflow import tool
@tool
def test_print_input(input_str: List[str], input_bool: List[bool], input_list: List[List], input_dict: List[dict]):
assert input_bool[0] == False
assert input_list[0] == []
assert input_dict[0] == {}
print(input_str)
return input_str | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/default_input/flow.dag.yaml |
inputs:
input_str:
type: string
default: input value from default
input_bool:
type: bool
default: False
input_list:
type: list
default: []
input_dict:
type: object
default: {}
outputs:
output:
type: string
reference: ${test_print_input.output}
nodes:
- name: test_print_input
type: python
source:
type: code
path: test_print_input.py
inputs:
input_str: ${inputs.input_str}
input_bool: ${inputs.input_bool}
input_list: ${inputs.input_list}
input_dict: ${inputs.input_dict}
- name: aggregate_node
type: python
source:
type: code
path: test_print_aggregation.py
inputs:
input_str: ${inputs.input_str}
input_bool: ${inputs.input_bool}
input_list: ${inputs.input_list}
input_dict: ${inputs.input_dict}
aggregation: true
use_variants: false
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/default_input/test_print_input.py | from promptflow import tool
@tool
def test_print_input(input_str: str, input_bool: bool, input_list: list, input_dict: dict):
assert not input_bool
assert input_list == []
assert input_dict == {}
print(input_str)
return input_str | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with_import/fail.py | from aaa import bbb # noqa: F401 | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with_import/flow.dag.yaml | inputs:
text:
type: string
outputs:
output:
type: string
reference: ${node1.output}
nodes:
- name: node1
type: python
source:
type: code
path: dummy_utils/main.py
inputs:
x: ${inputs.text}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with_import | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with_import/dummy_utils/util_tool.py | from promptflow import tool
@tool
def passthrough(x: str):
return x
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with_import | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with_import/dummy_utils/main.meta.json | {
"name": "main",
"type": "python",
"inputs": {
"x": {
"type": [
"string"
]
}
},
"source": "dummy_utils/main.py",
"function": "main"
} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with_import | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with_import/dummy_utils/main.py | from promptflow import tool
from dummy_utils.util_tool import passthrough
@tool
def main(x: str):
return passthrough(x)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_dict_input_with_variant/flow.dag.yaml | inputs:
key:
type: object
outputs:
output:
type: string
reference: ${print_val.output.value}
nodes:
- name: print_val
use_variants: true
type: python
source:
type: code
path: print_val.py
node_variants:
print_val:
default_variant_id: variant1
variants:
variant1:
node:
type: python
source:
type: code
path: print_val.py
inputs:
key: ${inputs.key}
conn: mock_custom_connection | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_dict_input_with_variant/print_val.py | from promptflow import tool
from promptflow.connections import CustomConnection
@tool
def get_val(key, conn: CustomConnection):
# get from env var
print(key)
if not isinstance(key, dict):
raise TypeError(f"key must be a dict, got {type(key)}")
return {"value": f"{key}: {type(key)}"}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_stream_output/chat.jinja2 | system:
You are a helpful assistant.
{% for item in chat_history %}
user:
{{item.inputs.question}}
assistant:
{{item.outputs.answer}}
{% endfor %}
user:
{{question}} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_stream_output/flow.dag.yaml | inputs:
chat_history:
type: list
is_chat_history: true
question:
type: string
is_chat_input: true
default: What is ChatGPT?
outputs:
answer:
type: string
reference: ${chat_node.output}
is_chat_output: true
nodes:
- inputs:
deployment_name: gpt-35-turbo
max_tokens: "256"
temperature: "0.7"
chat_history: ${inputs.chat_history}
question: ${inputs.question}
name: chat_node
type: llm
source:
type: code
path: chat.jinja2
api: chat
provider: AzureOpenAI
connection: azure_open_ai_connection | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants_unordered/samples.json | [
{
"line_number": 0,
"variant_id": "variant_0",
"groundtruth": "App",
"prediction": "App"
}
]
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants_unordered/convert_to_dict.py | import json
from promptflow import tool
@tool
def convert_to_dict(input_str: str):
try:
return json.loads(input_str)
except Exception as e:
print("input is not valid, error: {}".format(e))
return {"category": "None", "evidence": "None"}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants_unordered/fetch_text_content_from_url.py | import bs4
import requests
from promptflow import tool
@tool
def fetch_text_content_from_url(url: str):
# Send a request to the URL
try:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35"
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
# Parse the HTML content using BeautifulSoup
soup = bs4.BeautifulSoup(response.text, "html.parser")
soup.prettify()
return soup.get_text()[:2000]
else:
msg = (
f"Get url failed with status code {response.status_code}.\nURL: {url}\nResponse: {response.text[:100]}"
)
print(msg)
return "No available content"
except Exception as e:
print("Get url failed with error: {}".format(e))
return "No available content"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants_unordered/classify_with_llm.jinja2 | Your task is to classify a given url into one of the following types:
Movie, App, Academic, Channel, Profile, PDF or None based on the text content information.
The classification will be based on the url, the webpage text content summary, or both.
Here are a few examples:
{% for ex in examples %}
URL: {{ex.url}}
Text content: {{ex.text_content}}
OUTPUT:
{"category": "{{ex.category}}", "evidence": "{{ex.evidence}}"}
{% endfor %}
For a given URL : {{url}}, and text content: {{text_content}}.
Classify above url to complete the category and indicate evidence.
OUTPUT:
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants_unordered/summarize_text_content__variant_1.jinja2 | Please summarize some keywords of this paragraph and have some details of each keywords.
Do not add any information that is not in the text.
Text: {{text}}
Summary:
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants_unordered/prepare_examples.py | from promptflow import tool
@tool
def prepare_examples():
return [
{
"url": "https://play.google.com/store/apps/details?id=com.spotify.music",
"text_content": "Spotify is a free music and podcast streaming app with millions of songs, albums, and original podcasts. It also offers audiobooks, so users can enjoy thousands of stories. It has a variety of features such as creating and sharing music playlists, discovering new music, and listening to popular and exclusive podcasts. It also has a Premium subscription option which allows users to download and listen offline, and access ad-free music. It is available on all devices and has a variety of genres and artists to choose from.",
"category": "App",
"evidence": "Both",
},
{
"url": "https://www.youtube.com/channel/UC_x5XG1OV2P6uZZ5FSM9Ttw",
"text_content": "NFL Sunday Ticket is a service offered by Google LLC that allows users to watch NFL games on YouTube. It is available in 2023 and is subject to the terms and privacy policy of Google LLC. It is also subject to YouTube's terms of use and any applicable laws.",
"category": "Channel",
"evidence": "URL",
},
{
"url": "https://arxiv.org/abs/2303.04671",
"text_content": "Visual ChatGPT is a system that enables users to interact with ChatGPT by sending and receiving not only languages but also images, providing complex visual questions or visual editing instructions, and providing feedback and asking for corrected results. It incorporates different Visual Foundation Models and is publicly available. Experiments show that Visual ChatGPT opens the door to investigating the visual roles of ChatGPT with the help of Visual Foundation Models.",
"category": "Academic",
"evidence": "Text content",
},
{
"url": "https://ab.politiaromana.ro/",
"text_content": "There is no content available for this text.",
"category": "None",
"evidence": "None",
},
]
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants_unordered/flow.dag.yaml | inputs:
url:
type: string
default: https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h
outputs:
category:
type: string
reference: ${convert_to_dict.output.category}
evidence:
type: string
reference: ${convert_to_dict.output.evidence}
nodes:
- name: convert_to_dict
type: python
source:
type: code
path: convert_to_dict.py
inputs:
input_str: ${classify_with_llm.output}
- name: summarize_text_content
type: llm
source:
type: code
path: summarize_text_content__variant_1.jinja2
inputs:
deployment_name: gpt-35-turbo
suffix: ''
max_tokens: '256'
temperature: '0.2'
top_p: '1.0'
logprobs: ''
echo: 'False'
stop: ''
presence_penalty: '0'
frequency_penalty: '0'
best_of: '1'
logit_bias: ''
text: ${fetch_text_content_from_url.output}
provider: AzureOpenAI
connection: azure_open_ai_connection
api: completion
module: promptflow.tools.aoai
- name: classify_with_llm
type: llm
source:
type: code
path: classify_with_llm.jinja2
inputs:
deployment_name: gpt-35-turbo
suffix: ''
max_tokens: '128'
temperature: '0.2'
top_p: '1.0'
logprobs: ''
echo: 'False'
stop: ''
presence_penalty: '0'
frequency_penalty: '0'
best_of: '1'
logit_bias: ''
url: ${inputs.url}
examples: ${prepare_examples.output}
text_content: ${summarize_text_content.output}
provider: AzureOpenAI
connection: azure_open_ai_connection
api: completion
- name: fetch_text_content_from_url
type: python
source:
type: code
path: fetch_text_content_from_url.py
inputs:
url: ${inputs.url}
- name: prepare_examples
type: python
source:
type: code
path: prepare_examples.py
inputs: {} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants_unordered/summarize_text_content.jinja2 | Please summarize the following text in one paragraph. 100 words.
Do not add any information that is not in the text.
Text: {{text}}
Summary:
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/external_files/convert_to_dict.py | import json
from promptflow import tool
@tool
def convert_to_dict(input_str: str):
try:
return json.loads(input_str)
except Exception as e:
print("input is not valid, error: {}".format(e))
return {"category": "None", "evidence": "None"}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/external_files/fetch_text_content_from_url.py | import bs4
import requests
from promptflow import tool
@tool
def fetch_text_content_from_url(url: str):
# Send a request to the URL
try:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35"
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
# Parse the HTML content using BeautifulSoup
soup = bs4.BeautifulSoup(response.text, "html.parser")
soup.prettify()
return soup.get_text()[:2000]
else:
msg = (
f"Get url failed with status code {response.status_code}.\nURL: {url}\nResponse: {response.text[:100]}"
)
print(msg)
return "No available content"
except Exception as e:
print("Get url failed with error: {}".format(e))
return "No available content"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/external_files/summarize_text_content.jinja2 | Please summarize the following text in one paragraph. 100 words.
Do not add any information that is not in the text.
Text: {{text}}
Summary:
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/openai_chat_api_flow/samples.json | {
"question": "What is the capital of the United States of America?",
"chat_history": []
}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/openai_chat_api_flow/inputs.jsonl | {"question": "What is the capital of the United States of America?", "chat_history": [], "stream": true}
{"question": "What is the capital of the United States of America?", "chat_history": [], "stream": false}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/openai_chat_api_flow/chat.py | import openai
from openai.version import VERSION as OPENAI_VERSION
from typing import List
from promptflow import tool
from promptflow.connections import AzureOpenAIConnection
IS_LEGACY_OPENAI = OPENAI_VERSION.startswith("0.")
def get_client(connection: AzureOpenAIConnection):
api_key = connection.api_key
conn = dict(
api_key=connection.api_key,
)
if api_key.startswith("sk-"):
from openai import OpenAI as Client
else:
from openai import AzureOpenAI as Client
conn.update(
azure_endpoint=connection.api_base,
api_version=connection.api_version,
)
return Client(**conn)
def create_messages(question, chat_history):
yield {"role": "system", "content": "You are a helpful assistant."}
for chat in chat_history:
yield {"role": "user", "content": chat["inputs"]["question"]}
yield {"role": "assistant", "content": chat["outputs"]["answer"]}
yield {"role": "user", "content": question}
@tool
def chat(connection: AzureOpenAIConnection, question: str, chat_history: List, stream: bool) -> str:
if IS_LEGACY_OPENAI:
completion = openai.ChatCompletion.create(
engine="gpt-35-turbo",
messages=list(create_messages(question, chat_history)),
temperature=1.0,
top_p=1.0,
n=1,
stream=stream,
stop=None,
max_tokens=16,
**dict(connection),
)
else:
completion = get_client(connection).chat.completions.create(
model="gpt-35-turbo",
messages=list(create_messages(question, chat_history)),
temperature=1.0,
top_p=1.0,
n=1,
stream=stream,
stop=None,
max_tokens=16
)
if stream:
def generator():
for chunk in completion:
if chunk.choices:
if IS_LEGACY_OPENAI:
yield getattr(chunk.choices[0]["delta"], "content", "")
else:
yield chunk.choices[0].delta.content or ""
# We must return the generator object, not using yield directly here.
# Otherwise, the function itself will become a generator, despite whether stream is True or False.
# return generator()
return "".join(generator())
else:
# chat api may return message with no content.
if IS_LEGACY_OPENAI:
return getattr(completion.choices[0].message, "content", "")
else:
return completion.choices[0].message.content or ""
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/openai_chat_api_flow/flow.dag.yaml | inputs:
question:
type: string
chat_history:
type: list
stream:
type: bool
outputs:
answer:
type: string
reference: ${chat.output}
nodes:
- name: chat
type: python
source:
type: code
path: chat.py
inputs:
question: ${inputs.question}
chat_history: ${inputs.chat_history}
connection: azure_open_ai_connection
stream: ${inputs.stream}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_environment/requirements | tensorflow | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_environment/flow.dag.yaml | inputs:
key:
type: string
outputs:
output:
type: string
reference: ${print_env.output.value}
nodes:
- name: print_env
type: python
source:
type: code
path: print_env.py
inputs:
key: ${inputs.key}
environment:
python_requirements_txt: requirements
image: python:3.8-slim
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_environment/print_env.py | import os
from promptflow import tool
@tool
def get_env_var(key: str):
print(os.environ.get(key))
# get from env var
return {"value": os.environ.get(key)}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_environment | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_environment/.promptflow/flow.tools.json | {
"package": {},
"code": {
"print_env.py": {
"type": "python",
"inputs": {
"key": {
"type": [
"string"
]
}
},
"function": "get_env_var"
}
}
}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_input_dir/details.jsonl | {"url": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", "answer": "Channel", "evidence": "Url"}
{"url": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", "answer": "Channel", "evidence": "Url"}
{"url": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", "answer": "Channel", "evidence": "Url"}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/assistant_definition.yaml | model: gpt-4-1106-preview
instructions: You are a helpful assistant.
tools:
- type: code_interpreter
- type: function
source:
type: code
path: get_stock_eod_price.py
tool_type: python
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/README.md | # Stock EOD Price Analyzer
This sample demonstrates how the PromptFlow Assistant tool help with time series data (stock EOD price) retrieval, plot and consolidation.
Tools used in this flow:
- `get_or_create_thread` tool, python tool, used to provide assistant thread information if absent
- `add_message_and_run` tool, assistant tool, provisioned with below inner functions:
- `get_stock_eod_price``: get the stock eod price based on date and company name
## Prerequisites
Install promptflow sdk and other dependencies in this folder:
```bash
pip install -r requirements.txt
```
## What you will learn
In this flow, you will understand how assistant tools within PromptFlow are triggered by user prompts. The assistant tool decides which internal functions or tools to invoke based on the input provided. Your responsibility involves implementing each of these tools and registering them in the `assistant_definition`. Additionally, be aware that the tools may have dependencies on each other, affecting the order and manner of their invocation.
## Getting started
### 1. Create assistant connection (openai)
Go to "Prompt flow" "Connections" tab. Click on "Create" button, select one of LLM tool supported connection types and fill in the configurations.
Currently, only "Open AI" connection type are supported for assistant tool. Please refer to [OpenAI](https://platform.openai.com/) for more details.
```bash
# Override keys with --set to avoid yaml file changes
pf connection create --file ../../../connections/azure_openai.yml --set api_key=<your_api_key>
```
Note in [flow.dag.yaml](flow.dag.yaml) we are using connection named `open_ai_connection`.
```bash
# show registered connection
pf connection show --name open_ai_connection
```
### 2. Create or get assistant/thread
Navigate to the OpenAI Assistant page and create an assistant if you haven't already. Once created, click on the 'Test' button to enter the assistant's playground. Make sure to note down the assistant_id.
**[Optional]** Start a chat session to create thread automatically. Keep track of the thread_id.
### 3. run the flow
```bash
# run chat flow with default question in flow.dag.yaml
pf flow test --flow .
```
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/get_or_create_thread.py | from openai import AsyncOpenAI
from promptflow import tool
from promptflow.connections import OpenAIConnection
@tool
async def get_or_create_thread(conn: OpenAIConnection, thread_id: str):
if thread_id:
return thread_id
cli = AsyncOpenAI(api_key=conn.api_key, organization=conn.organization)
thread = await cli.beta.threads.create()
return thread.id
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/requirements.txt | promptflow
promptflow-tools | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/stock_price.csv | Date,A,B
2023-03-15,100.25,110.50
2023-03-16,102.75,114.35
2023-03-17,101.60,120.10
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/add_message_and_run.py | import asyncio
import json
from openai import AsyncOpenAI
from openai.types.beta.threads import MessageContentImageFile, MessageContentText
from promptflow import tool, trace
from promptflow.connections import OpenAIConnection
from promptflow.contracts.multimedia import Image
from promptflow.contracts.types import AssistantDefinition
from promptflow.exceptions import SystemErrorException
from promptflow.executor._assistant_tool_invoker import AssistantToolInvoker
URL_PREFIX = "https://platform.openai.com/files/"
RUN_STATUS_POLLING_INTERVAL_IN_MILSEC = 1000
@tool
async def add_message_and_run(
conn: OpenAIConnection,
assistant_id: str,
thread_id: str,
message: list,
assistant_definition: AssistantDefinition,
download_images: bool,
):
cli = await get_openai_api_client(conn)
invoker = await get_assisant_tool_invoker(assistant_definition)
# Check if assistant id is valid. If not, create a new assistant.
# Note: tool registration at run creation, rather than at assistant creation.
if not assistant_id:
assistant = await create_assistant(cli, assistant_definition)
assistant_id = assistant.id
await add_message(cli, message, thread_id)
run = await start_run(cli, assistant_id, thread_id, assistant_definition, invoker)
await wait_for_run_complete(cli, thread_id, invoker, run)
messages = await get_message(cli, thread_id)
file_id_references = await get_openai_file_references(messages.data[0].content, download_images, conn)
return {"content": to_pf_content(messages.data[0].content), "file_id_references": file_id_references}
@trace
async def get_openai_api_client(conn: OpenAIConnection):
cli = AsyncOpenAI(api_key=conn.api_key, organization=conn.organization)
return cli
@trace
async def get_assisant_tool_invoker(assistant_definition: AssistantDefinition):
invoker = AssistantToolInvoker.init(assistant_definition.tools)
return invoker
@trace
async def create_assistant(cli: AsyncOpenAI, assistant_definition: AssistantDefinition):
assistant = await cli.beta.assistants.create(
instructions=assistant_definition.instructions, model=assistant_definition.model
)
print(f"Created assistant: {assistant.id}")
return assistant
@trace
async def add_message(cli: AsyncOpenAI, message: list, thread_id: str):
content = extract_text_from_message(message)
file_ids = await extract_file_ids_from_message(cli, message)
msg = await cli.beta.threads.messages.create(thread_id=thread_id, role="user", content=content, file_ids=file_ids)
print("Created message message_id: {msg.id}, assistant_id: {assistant_id}, thread_id: {thread_id}")
return msg
@trace
async def start_run(
cli: AsyncOpenAI,
assistant_id: str,
thread_id: str,
assistant_definition: AssistantDefinition,
invoker: AssistantToolInvoker,
):
tools = invoker.to_openai_tools()
run = await cli.beta.threads.runs.create(
assistant_id=assistant_id,
thread_id=thread_id,
model=assistant_definition.model,
instructions=assistant_definition.instructions,
tools=tools,
)
print(f"Assistant_id: {assistant_id}, thread_id: {thread_id}, run_id: {run.id}")
return run
async def wait_for_status_check():
await asyncio.sleep(RUN_STATUS_POLLING_INTERVAL_IN_MILSEC / 1000.0)
async def get_run_status(cli: AsyncOpenAI, thread_id: str, run_id: str):
run = await cli.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
print(f"Run status: {run.status}")
return run
@trace
async def get_tool_calls_outputs(invoker: AssistantToolInvoker, run):
tool_calls = run.required_action.submit_tool_outputs.tool_calls
tool_outputs = []
for tool_call in tool_calls:
tool_name = tool_call.function.name
tool_args = json.loads(tool_call.function.arguments)
print(f"Invoking tool: {tool_call.function.name} with args: {tool_args}")
output = invoker.invoke_tool(tool_name, tool_args)
tool_outputs.append(
{
"tool_call_id": tool_call.id,
"output": str(output),
}
)
print(f"Tool output: {str(output)}")
return tool_outputs
@trace
async def submit_tool_calls_outputs(cli: AsyncOpenAI, thread_id: str, run_id: str, tool_outputs: list):
await cli.beta.threads.runs.submit_tool_outputs(thread_id=thread_id, run_id=run_id, tool_outputs=tool_outputs)
print(f"Submitted all required resonses for run: {run_id}")
@trace
async def require_actions(cli: AsyncOpenAI, thread_id: str, run, invoker: AssistantToolInvoker):
tool_outputs = await get_tool_calls_outputs(invoker, run)
await submit_tool_calls_outputs(cli, thread_id, run.id, tool_outputs)
@trace
async def wait_for_run_complete(cli: AsyncOpenAI, thread_id: str, invoker: AssistantToolInvoker, run):
while run.status != "completed":
await wait_for_status_check()
run = await get_run_status(cli, thread_id, run.id)
if run.status == "requires_action":
await require_actions(cli, thread_id, run, invoker)
elif run.status == "in_progress" or run.status == "completed":
continue
else:
raise Exception(f"The assistant tool runs in '{run.status}' status. Message: {run.last_error.message}")
@trace
async def get_run_steps(cli: AsyncOpenAI, thread_id: str, run_id: str):
run_steps = await cli.beta.threads.runs.steps.list(thread_id=thread_id, run_id=run_id)
print("step details: \n")
for step_data in run_steps.data:
print(step_data.step_details)
@trace
async def get_message(cli: AsyncOpenAI, thread_id: str):
messages = await cli.beta.threads.messages.list(thread_id=thread_id)
return messages
def extract_text_from_message(message: list):
content = []
for m in message:
if isinstance(m, str):
content.append(m)
continue
message_type = m.get("type", "")
if message_type == "text" and "text" in m:
content.append(m["text"])
return "\n".join(content)
async def extract_file_ids_from_message(cli: AsyncOpenAI, message: list):
file_ids = []
for m in message:
if isinstance(m, str):
continue
message_type = m.get("type", "")
if message_type == "file_path" and "file_path" in m:
path = m["file_path"].get("path", "")
if path:
file = await cli.files.create(file=open(path, "rb"), purpose="assistants")
file_ids.append(file.id)
return file_ids
async def get_openai_file_references(content: list, download_image: bool, conn: OpenAIConnection):
file_id_references = {}
for item in content:
if isinstance(item, MessageContentImageFile):
file_id = item.image_file.file_id
if download_image:
file_id_references[file_id] = {
"content": await download_openai_image(file_id, conn),
"url": URL_PREFIX + file_id,
}
else:
file_id_references[file_id] = {"url": URL_PREFIX + file_id}
elif isinstance(item, MessageContentText):
for annotation in item.text.annotations:
if annotation.type == "file_path":
file_id = annotation.file_path.file_id
file_id_references[file_id] = {"url": URL_PREFIX + file_id}
elif annotation.type == "file_citation":
file_id = annotation.file_citation.file_id
file_id_references[file_id] = {"url": URL_PREFIX + file_id}
else:
raise Exception(f"Unsupported content type: '{type(item)}'.")
return file_id_references
def to_pf_content(content: list):
pf_content = []
for item in content:
if isinstance(item, MessageContentImageFile):
file_id = item.image_file.file_id
pf_content.append({"type": "image_file", "image_file": {"file_id": file_id}})
elif isinstance(item, MessageContentText):
text_dict = {"type": "text", "text": {"value": item.text.value, "annotations": []}}
for annotation in item.text.annotations:
annotation_dict = {
"type": "file_path",
"text": annotation.text,
"start_index": annotation.start_index,
"end_index": annotation.end_index,
}
if annotation.type == "file_path":
annotation_dict["file_path"] = {"file_id": annotation.file_path.file_id}
elif annotation.type == "file_citation":
annotation_dict["file_citation"] = {"file_id": annotation.file_citation.file_id}
text_dict["text"]["annotations"].append(annotation_dict)
pf_content.append(text_dict)
else:
raise SystemErrorException(f"Unsupported content type: {type(item)}")
return pf_content
async def download_openai_image(file_id: str, conn: OpenAIConnection):
cli = AsyncOpenAI(api_key=conn.api_key, organization=conn.organization)
image_data = await cli.files.content(file_id)
return Image(image_data.read())
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/flow.dag.yaml | version: 2
inputs:
assistant_input:
type: list
default:
- type: text
text: The provided file contains end-of-day (EOD) stock prices for companies A
and B across various dates in March. However, it does not include the
EOD stock prices for Company C.
- type: file_path
file_path:
path: ./stock_price.csv
- type: text
text: Please draw a line chart with the stock price of the company A, B and C
and return a CVS file with the data.
assistant_id:
type: string
default: asst_eHO2rwEYqGH3pzzHHov2kBCG
thread_id:
type: string
default: ""
outputs:
assistant_output:
type: string
reference: ${add_message_and_run.output}
thread_id:
type: string
reference: ${get_or_create_thread.output}
nodes:
- name: get_or_create_thread
type: python
source:
type: code
path: get_or_create_thread.py
inputs:
conn: chw_openai
thread_id: ${inputs.thread_id}
- name: add_message_and_run
type: python
source:
type: code
path: add_message_and_run.py
inputs:
conn: chw_openai
message: ${inputs.assistant_input}
assistant_id: ${inputs.assistant_id}
thread_id: ${get_or_create_thread.output}
assistant_definition: assistant_definition.yaml
download_images: true
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/get_stock_eod_price.py | import random
import time
from promptflow import tool
@tool
def get_stock_eod_price(date: str, company: str):
"""Get the stock end of day price by date and symbol.
:param date: the date of the stock price. e.g. 2021-01-01
:type date: str
:param company: the company name like A, B, C
:type company: str
"""
print(f"Try to get the stock end of day price by date {date} and company {company}.")
# Sleep a random number between 0.2s and 1s for tracing purpose
time.sleep(random.uniform(0.2, 1))
return random.uniform(110, 130)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_defined_chat_history/show_answer.py | from promptflow import tool
@tool
def show_answer(chat_answer: str):
print("print:", chat_answer)
return chat_answer
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_defined_chat_history/chat.jinja2 | system:
You are a helpful assistant.
{% for item in chat_history %}
user:
{{item.inputs.question}}
assistant:
{{item.outputs.answer}}
{% endfor %}
user:
{{question}} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_defined_chat_history/flow.dag.yaml | inputs:
user_chat_history:
type: list
is_chat_history: true
question:
type: string
is_chat_input: true
default: What is ChatGPT?
outputs:
answer:
type: string
reference: ${show_answer.output}
is_chat_output: true
nodes:
- inputs:
deployment_name: gpt-35-turbo
max_tokens: "256"
temperature: "0.7"
chat_history: ${inputs.user_chat_history}
question: ${inputs.question}
name: chat_node
type: llm
source:
type: code
path: chat.jinja2
api: chat
provider: AzureOpenAI
connection: azure_open_ai_connection
- name: show_answer
type: python
source:
type: code
path: show_answer.py
inputs:
chat_answer: ${chat_node.output}
node_variants: {}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/llm_connection_override/connection_arm_template.json | {
"id": "/subscriptions/xxxx/resourceGroups/xxx/providers/Microsoft.MachineLearningServices/workspaces/xxx/connections/azure_open_ai_connection",
"name": "azure_open_ai_connection",
"type": "Microsoft.MachineLearningServices/workspaces/connections",
"properties": {
"authType": "ApiKey",
"credentials": {
"key": "api_key"
},
"category": "AzureOpenAI",
"expiryTime": null,
"target": "api_base",
"createdByWorkspaceArmId": null,
"isSharedToAll": false,
"sharedUserList": [],
"metadata": {
"azureml.flow.connection_type": "AzureOpenAI",
"azureml.flow.module": "promptflow.connections",
"ApiType": "azure",
"ApiVersion": "2023-03-15-preview"
}
},
"systemData": {
"createdAt": "2023-06-14T09:40:51.1117116Z",
"createdBy": "[email protected]",
"createdByType": "User",
"lastModifiedAt": "2023-06-14T09:40:51.1117116Z",
"lastModifiedBy": "[email protected]",
"lastModifiedByType": "User"
}
} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/llm_connection_override/conn_tool.py | from promptflow import tool
from promptflow.connections import AzureOpenAIConnection
@tool
def conn_tool(conn: AzureOpenAIConnection):
assert isinstance(conn, AzureOpenAIConnection)
return conn.api_base | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/llm_connection_override/flow.dag.yaml | inputs: {}
outputs:
output:
type: string
reference: ${conn_node.output}
nodes:
- name: conn_node
type: python
source:
type: code
path: conn_tool.py
inputs:
conn: aoai connection
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_multi_output_invalid/chat.jinja2 | system:
You are a helpful assistant.
{% for item in chat_history %}
user:
{{item.inputs.question}}
assistant:
{{item.outputs.answer}}
{% endfor %}
user:
{{question}} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_multi_output_invalid/flow.dag.yaml | inputs:
chat_history:
type: list
question:
type: string
is_chat_input: true
default: What is ChatGPT?
outputs:
answer:
type: string
reference: ${chat_node.output}
is_chat_output: true
multi_answer:
type: string
reference: ${chat_node.output}
is_chat_output: true
nodes:
- inputs:
deployment_name: gpt-35-turbo
max_tokens: "256"
temperature: "0.7"
chat_history: ${inputs.chat_history}
question: ${inputs.question}
name: chat_node
type: llm
source:
type: code
path: chat.jinja2
api: chat
provider: AzureOpenAI
connection: azure_open_ai_connection | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/basic_with_builtin_llm_node/flow.dag.yaml | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs:
text:
type: string
default: Python Hello World!
outputs:
output:
type: string
reference: ${llm.output}
nodes:
- name: hello_prompt
type: prompt
inputs:
text: ${inputs.text}
source:
type: code
path: hello.jinja2
- name: llm
type: llm
inputs:
prompt: ${hello_prompt.output}
deployment_name: gpt-35-turbo
model: gpt-3.5-turbo
max_tokens: '120'
source:
type: code
path: hello.jinja2
connection: azure_open_ai_connection
api: chat
node_variants: {}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/basic_with_builtin_llm_node/hello.jinja2 | system:
You are a assistant which can write code. Response should only contain code.
user:
Write a simple {{text}} program that displays the greeting message when executed. | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_custom_connection/hello.py | from promptflow import tool
from promptflow.connections import CustomConnection
@tool
def my_python_tool(text: str, connection: CustomConnection) -> dict:
return connection._to_dict()
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_custom_connection/flow.dag.yaml | inputs:
text:
type: string
outputs:
output:
type: object
reference: ${hello_node.output}
nodes:
- inputs:
text: ${inputs.text}
connection: basic_custom_connection
name: hello_node
type: python
source:
type: code
path: hello.py
node_variants: {}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_package_tool_with_custom_strong_type_connection/data.jsonl | {"text": "Hello World!"}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_package_tool_with_custom_strong_type_connection/flow.dag.yaml | inputs:
text:
type: string
default: Hello!
outputs:
out:
type: string
reference: ${My_First_Tool_00f8.output}
nodes:
- name: My_Second_Tool_usi3
type: python
source:
type: package
tool: my_tool_package.tools.my_tool_2.MyTool.my_tool
inputs:
connection: custom_strong_type_connection
input_text: ${inputs.text}
- name: My_First_Tool_00f8
type: python
source:
type: package
tool: my_tool_package.tools.my_tool_1.my_tool
inputs:
connection: custom_strong_type_connection
input_text: ${My_Second_Tool_usi3.output}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/meta_files/samples.json | [
{
"line_number": 0,
"variant_id": "variant_0",
"groundtruth": "App",
"prediction": "App"
}
]
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/meta_files/remote_fs.meta.yaml | $schema: https://azuremlschemas.azureedge.net/latest/flow.schema.json
name: classification_accuracy_eval
type: evaluate
path: azureml://datastores/workspaceworkingdirectory/paths/Users/wanhan/my_flow_snapshot/flow.dag.yaml
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/meta_files/remote_flow_short_path.meta.yaml | $schema: https://azuremlschemas.azureedge.net/latest/flow.schema.json
name: classification_accuracy_eval
display_name: Classification Accuracy Evaluation
type: evaluate
path: azureml://datastores/workspaceworkingdirectory/paths/Users/wanhan/a/flow.dag.yaml
description: Measuring the performance of a classification system by comparing its outputs to groundtruth.
properties:
promptflow.stage: prod
promptflow.details.type: markdown
promptflow.details.source: README.md
promptflow.batch_inputs: samples.json
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/meta_files/flow.dag.yaml | inputs:
line_number:
type: int
variant_id:
type: string
groundtruth:
type: string
description: Please specify the groundtruth column, which contains the true label
to the outputs that your flow produces.
prediction:
type: string
description: Please specify the prediction column, which contains the predicted
outputs that your flow produces.
outputs:
grade:
type: string
reference: ${grade.output}
nodes:
- name: grade
type: python
source:
type: code
path: grade.py
inputs:
groundtruth: ${inputs.groundtruth}
prediction: ${inputs.prediction}
- name: calculate_accuracy
type: python
source:
type: code
path: calculate_accuracy.py
inputs:
grades: ${grade.output}
variant_ids: ${inputs.variant_id}
aggregation: true
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/meta_files/flow.meta.yaml | $schema: https://azuremlschemas.azureedge.net/latest/flow.schema.json
name: web_classificiation_flow_3
display_name: Web Classification
type: standard
description: Create flows that use large language models to classify URLs into multiple categories.
path: ./flow.dag.yaml
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/mod-n | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/mod-n/two/mod_two.py | from promptflow import tool
@tool
def mod_two(number: int):
if number % 2 != 0:
raise Exception("cannot mod 2!")
return {"value": number}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/mod-n | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/mod-n/two/flow.dag.yaml | inputs:
number:
type: int
outputs:
output:
type: int
reference: ${mod_two.output.value}
nodes:
- name: mod_two
type: python
source:
type: code
path: mod_two.py
inputs:
number: ${inputs.number}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/mod-n/two | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/mod-n/two/.promptflow/flow.tools.json | {
"code": {
"mod_two.py": {
"type": "python",
"inputs": {
"number": {
"type": [
"int"
]
}
},
"source": "mod_two.py",
"function": "mod_two"
}
},
"package": {
"promptflow.tools.aoai_gpt4v.AzureOpenAI.chat": {
"name": "Azure OpenAI GPT-4 Turbo with Vision",
"description": "Use Azure OpenAI GPT-4 Turbo with Vision to leverage AOAI vision ability.",
"type": "custom_llm",
"module": "promptflow.tools.aoai_gpt4v",
"class_name": "AzureOpenAI",
"function": "chat",
"tool_state": "preview",
"icon": {
"light": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAx0lEQVR4nJWSwQ2CQBBFX0jAcjgqXUgPJNiIsQQrIVCIFy8GC6ABDcGDX7Mus9n1Xz7zZ+fPsLPwH4bUg0dD2wMPcbR48Uxq4AKU4iSTDwZ1LhWXipN/B3V0J6hjBTvgLHZNonewBXrgDpzEvXSIjN0BE3AACmmF4kl5F6tNzcCoLpW0SvGovFvsb4oZ2AANcAOu4ka6axCcINN3rg654sww+CYsPD0OwjcozFNh/Qcd78tqVbCIW+n+Fky472Bh/Q6SYb1EEy8tDzd+9IsVPAAAAABJRU5ErkJggg==",
"dark": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAA2ElEQVR4nJXSzW3CQBAF4DUSTjk+Al1AD0ikESslpBIEheRALhEpgAYSWV8OGUublf/yLuP3PPNmdndS+gdwXZrYDmh7fGE/W+wXbaYd8IYm4rxJPnZ0boI3wZcdJxs/n+AwV7DFK7aFyfQdYIMLPvES8YJNf5yp4jMeeEYdWh38gXOR35YGHe5xabvQdsHv6PLi8qV6gycc8YH3iMfQu6Lh4ASr+F5Hh3XwVWnQYzUkVlX1nccplAb1SN6Y/sfgmlK64VS8wimldIv/0yj2QLkHizG0iWP4AVAfQ34DVQONAAAAAElFTkSuQmCC"
},
"default_prompt": "# system:\nAs an AI assistant, your task involves interpreting images and responding to questions about the image.\nRemember to provide accurate answers based on the information present in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n",
"inputs": {
"connection": {
"type": [
"AzureOpenAIConnection"
]
},
"deployment_name": {
"type": [
"string"
]
},
"temperature": {
"default": 1,
"type": [
"double"
]
},
"top_p": {
"default": 1,
"type": [
"double"
]
},
"max_tokens": {
"default": 512,
"type": [
"int"
]
},
"stop": {
"default": "",
"type": [
"list"
]
},
"presence_penalty": {
"default": 0,
"type": [
"double"
]
},
"frequency_penalty": {
"default": 0,
"type": [
"double"
]
}
},
"package": "promptflow-tools",
"package_version": "1.0.2"
},
"promptflow.tools.azure_content_safety.analyze_text": {
"module": "promptflow.tools.azure_content_safety",
"function": "analyze_text",
"inputs": {
"connection": {
"type": [
"AzureContentSafetyConnection"
]
},
"hate_category": {
"default": "medium_sensitivity",
"enum": [
"disable",
"low_sensitivity",
"medium_sensitivity",
"high_sensitivity"
],
"type": [
"string"
]
},
"self_harm_category": {
"default": "medium_sensitivity",
"enum": [
"disable",
"low_sensitivity",
"medium_sensitivity",
"high_sensitivity"
],
"type": [
"string"
]
},
"sexual_category": {
"default": "medium_sensitivity",
"enum": [
"disable",
"low_sensitivity",
"medium_sensitivity",
"high_sensitivity"
],
"type": [
"string"
]
},
"text": {
"type": [
"string"
]
},
"violence_category": {
"default": "medium_sensitivity",
"enum": [
"disable",
"low_sensitivity",
"medium_sensitivity",
"high_sensitivity"
],
"type": [
"string"
]
}
},
"name": "Content Safety (Text Analyze)",
"description": "Use Azure Content Safety to detect harmful content.",
"type": "python",
"deprecated_tools": [
"content_safety_text.tools.content_safety_text_tool.analyze_text"
],
"package": "promptflow-tools",
"package_version": "1.0.2"
},
"promptflow.tools.embedding.embedding": {
"name": "Embedding",
"description": "Use Open AI's embedding model to create an embedding vector representing the input text.",
"type": "python",
"module": "promptflow.tools.embedding",
"function": "embedding",
"inputs": {
"connection": {
"type": [
"AzureOpenAIConnection",
"OpenAIConnection"
]
},
"deployment_name": {
"type": [
"string"
],
"enabled_by": "connection",
"enabled_by_type": [
"AzureOpenAIConnection"
],
"capabilities": {
"completion": false,
"chat_completion": false,
"embeddings": true
},
"model_list": [
"text-embedding-ada-002",
"text-search-ada-doc-001",
"text-search-ada-query-001"
]
},
"model": {
"type": [
"string"
],
"enabled_by": "connection",
"enabled_by_type": [
"OpenAIConnection"
],
"enum": [
"text-embedding-ada-002",
"text-search-ada-doc-001",
"text-search-ada-query-001"
],
"allow_manual_entry": true
},
"input": {
"type": [
"string"
]
}
},
"package": "promptflow-tools",
"package_version": "1.0.2"
},
"promptflow.tools.openai_gpt4v.OpenAI.chat": {
"name": "OpenAI GPT-4V",
"description": "Use OpenAI GPT-4V to leverage vision ability.",
"type": "custom_llm",
"module": "promptflow.tools.openai_gpt4v",
"class_name": "OpenAI",
"function": "chat",
"tool_state": "preview",
"icon": {
"light": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAx0lEQVR4nJWSwQ2CQBBFX0jAcjgqXUgPJNiIsQQrIVCIFy8GC6ABDcGDX7Mus9n1Xz7zZ+fPsLPwH4bUg0dD2wMPcbR48Uxq4AKU4iSTDwZ1LhWXipN/B3V0J6hjBTvgLHZNonewBXrgDpzEvXSIjN0BE3AACmmF4kl5F6tNzcCoLpW0SvGovFvsb4oZ2AANcAOu4ka6axCcINN3rg654sww+CYsPD0OwjcozFNh/Qcd78tqVbCIW+n+Fky472Bh/Q6SYb1EEy8tDzd+9IsVPAAAAABJRU5ErkJggg==",
"dark": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAA2ElEQVR4nJXSzW3CQBAF4DUSTjk+Al1AD0ikESslpBIEheRALhEpgAYSWV8OGUublf/yLuP3PPNmdndS+gdwXZrYDmh7fGE/W+wXbaYd8IYm4rxJPnZ0boI3wZcdJxs/n+AwV7DFK7aFyfQdYIMLPvES8YJNf5yp4jMeeEYdWh38gXOR35YGHe5xabvQdsHv6PLi8qV6gycc8YH3iMfQu6Lh4ASr+F5Hh3XwVWnQYzUkVlX1nccplAb1SN6Y/sfgmlK64VS8wimldIv/0yj2QLkHizG0iWP4AVAfQ34DVQONAAAAAElFTkSuQmCC"
},
"default_prompt": "# system:\nAs an AI assistant, your task involves interpreting images and responding to questions about the image.\nRemember to provide accurate answers based on the information present in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n",
"inputs": {
"connection": {
"type": [
"OpenAIConnection"
]
},
"model": {
"enum": [
"gpt-4-vision-preview"
],
"allow_manual_entry": true,
"type": [
"string"
]
},
"temperature": {
"default": 1,
"type": [
"double"
]
},
"top_p": {
"default": 1,
"type": [
"double"
]
},
"max_tokens": {
"default": 512,
"type": [
"int"
]
},
"stop": {
"default": "",
"type": [
"list"
]
},
"presence_penalty": {
"default": 0,
"type": [
"double"
]
},
"frequency_penalty": {
"default": 0,
"type": [
"double"
]
}
},
"package": "promptflow-tools",
"package_version": "1.0.2"
},
"promptflow.tools.open_model_llm.OpenModelLLM.call": {
"name": "Open Model LLM",
"description": "Use an open model from the Azure Model catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion API calls.",
"icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"type": "custom_llm",
"module": "promptflow.tools.open_model_llm",
"class_name": "OpenModelLLM",
"function": "call",
"inputs": {
"endpoint_name": {
"type": [
"string"
],
"dynamic_list": {
"func_path": "promptflow.tools.open_model_llm.list_endpoint_names"
},
"allow_manual_entry": true,
"is_multi_select": false
},
"deployment_name": {
"default": "",
"type": [
"string"
],
"dynamic_list": {
"func_path": "promptflow.tools.open_model_llm.list_deployment_names",
"func_kwargs": [
{
"name": "endpoint",
"type": [
"string"
],
"optional": true,
"reference": "${inputs.endpoint}"
}
]
},
"allow_manual_entry": true,
"is_multi_select": false
},
"api": {
"enum": [
"chat",
"completion"
],
"type": [
"string"
]
},
"temperature": {
"default": 1.0,
"type": [
"double"
]
},
"max_new_tokens": {
"default": 500,
"type": [
"int"
]
},
"top_p": {
"default": 1.0,
"advanced": true,
"type": [
"double"
]
},
"model_kwargs": {
"default": "{}",
"advanced": true,
"type": [
"object"
]
}
},
"package": "promptflow-tools",
"package_version": "1.0.2"
},
"promptflow.tools.serpapi.SerpAPI.search": {
"name": "Serp API",
"description": "Use Serp API to obtain search results from a specific search engine.",
"inputs": {
"connection": {
"type": [
"SerpConnection"
]
},
"engine": {
"default": "google",
"enum": [
"google",
"bing"
],
"type": [
"string"
]
},
"location": {
"default": "",
"type": [
"string"
]
},
"num": {
"default": "10",
"type": [
"int"
]
},
"query": {
"type": [
"string"
]
},
"safe": {
"default": "off",
"enum": [
"active",
"off"
],
"type": [
"string"
]
}
},
"type": "python",
"module": "promptflow.tools.serpapi",
"class_name": "SerpAPI",
"function": "search",
"package": "promptflow-tools",
"package_version": "1.0.2"
},
"my_tool_package.tools.my_tool_1.my_tool": {
"function": "my_tool",
"inputs": {
"connection": {
"type": [
"CustomConnection"
],
"custom_type": [
"MyFirstConnection",
"MySecondConnection"
]
},
"input_text": {
"type": [
"string"
]
}
},
"module": "my_tool_package.tools.my_tool_1",
"name": "My First Tool",
"description": "This is my first tool",
"type": "python",
"package": "test-custom-tools",
"package_version": "0.0.2"
},
"my_tool_package.tools.my_tool_2.MyTool.my_tool": {
"class_name": "MyTool",
"function": "my_tool",
"inputs": {
"connection": {
"type": [
"CustomConnection"
],
"custom_type": [
"MySecondConnection"
]
},
"input_text": {
"type": [
"string"
]
}
},
"module": "my_tool_package.tools.my_tool_2",
"name": "My Second Tool",
"description": "This is my second tool",
"type": "python",
"package": "test-custom-tools",
"package_version": "0.0.2"
},
"my_tool_package.tools.my_tool_with_custom_strong_type_connection.my_tool": {
"function": "my_tool",
"inputs": {
"connection": {
"custom_type": [
"MyCustomConnection"
],
"type": [
"CustomConnection"
]
},
"input_param": {
"type": [
"string"
]
}
},
"module": "my_tool_package.tools.my_tool_with_custom_strong_type_connection",
"name": "Tool With Custom Strong Type Connection",
"description": "This is my tool with custom strong type connection.",
"type": "python",
"package": "test-custom-tools",
"package_version": "0.0.2"
}
}
} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/mod-n | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/mod-n/three/flow.dag.yaml | inputs:
number:
type: int
outputs:
output:
type: int
reference: ${mod_three.output.value}
nodes:
- name: mod_three
type: python
source:
type: code
path: mod_three.py
inputs:
number: ${inputs.number}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/mod-n | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/mod-n/three/mod_three.py | from promptflow import tool
@tool
def mod_three(number: int):
if number % 3 != 0:
raise Exception("cannot mod 3!")
return {"value": number}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/mod-n/three | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/mod-n/three/.promptflow/flow.tools.json | {
"code": {
"mod_three.py": {
"type": "python",
"inputs": {
"number": {
"type": [
"int"
]
}
},
"source": "mod_three.py",
"function": "mod_three"
}
},
"package": {
"promptflow.tools.aoai_gpt4v.AzureOpenAI.chat": {
"name": "Azure OpenAI GPT-4 Turbo with Vision",
"description": "Use Azure OpenAI GPT-4 Turbo with Vision to leverage AOAI vision ability.",
"type": "custom_llm",
"module": "promptflow.tools.aoai_gpt4v",
"class_name": "AzureOpenAI",
"function": "chat",
"tool_state": "preview",
"icon": {
"light": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAx0lEQVR4nJWSwQ2CQBBFX0jAcjgqXUgPJNiIsQQrIVCIFy8GC6ABDcGDX7Mus9n1Xz7zZ+fPsLPwH4bUg0dD2wMPcbR48Uxq4AKU4iSTDwZ1LhWXipN/B3V0J6hjBTvgLHZNonewBXrgDpzEvXSIjN0BE3AACmmF4kl5F6tNzcCoLpW0SvGovFvsb4oZ2AANcAOu4ka6axCcINN3rg654sww+CYsPD0OwjcozFNh/Qcd78tqVbCIW+n+Fky472Bh/Q6SYb1EEy8tDzd+9IsVPAAAAABJRU5ErkJggg==",
"dark": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAA2ElEQVR4nJXSzW3CQBAF4DUSTjk+Al1AD0ikESslpBIEheRALhEpgAYSWV8OGUublf/yLuP3PPNmdndS+gdwXZrYDmh7fGE/W+wXbaYd8IYm4rxJPnZ0boI3wZcdJxs/n+AwV7DFK7aFyfQdYIMLPvES8YJNf5yp4jMeeEYdWh38gXOR35YGHe5xabvQdsHv6PLi8qV6gycc8YH3iMfQu6Lh4ASr+F5Hh3XwVWnQYzUkVlX1nccplAb1SN6Y/sfgmlK64VS8wimldIv/0yj2QLkHizG0iWP4AVAfQ34DVQONAAAAAElFTkSuQmCC"
},
"default_prompt": "# system:\nAs an AI assistant, your task involves interpreting images and responding to questions about the image.\nRemember to provide accurate answers based on the information present in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n",
"inputs": {
"connection": {
"type": [
"AzureOpenAIConnection"
]
},
"deployment_name": {
"type": [
"string"
]
},
"temperature": {
"default": 1,
"type": [
"double"
]
},
"top_p": {
"default": 1,
"type": [
"double"
]
},
"max_tokens": {
"default": 512,
"type": [
"int"
]
},
"stop": {
"default": "",
"type": [
"list"
]
},
"presence_penalty": {
"default": 0,
"type": [
"double"
]
},
"frequency_penalty": {
"default": 0,
"type": [
"double"
]
}
},
"package": "promptflow-tools",
"package_version": "1.0.2"
},
"promptflow.tools.azure_content_safety.analyze_text": {
"module": "promptflow.tools.azure_content_safety",
"function": "analyze_text",
"inputs": {
"connection": {
"type": [
"AzureContentSafetyConnection"
]
},
"hate_category": {
"default": "medium_sensitivity",
"enum": [
"disable",
"low_sensitivity",
"medium_sensitivity",
"high_sensitivity"
],
"type": [
"string"
]
},
"self_harm_category": {
"default": "medium_sensitivity",
"enum": [
"disable",
"low_sensitivity",
"medium_sensitivity",
"high_sensitivity"
],
"type": [
"string"
]
},
"sexual_category": {
"default": "medium_sensitivity",
"enum": [
"disable",
"low_sensitivity",
"medium_sensitivity",
"high_sensitivity"
],
"type": [
"string"
]
},
"text": {
"type": [
"string"
]
},
"violence_category": {
"default": "medium_sensitivity",
"enum": [
"disable",
"low_sensitivity",
"medium_sensitivity",
"high_sensitivity"
],
"type": [
"string"
]
}
},
"name": "Content Safety (Text Analyze)",
"description": "Use Azure Content Safety to detect harmful content.",
"type": "python",
"deprecated_tools": [
"content_safety_text.tools.content_safety_text_tool.analyze_text"
],
"package": "promptflow-tools",
"package_version": "1.0.2"
},
"promptflow.tools.embedding.embedding": {
"name": "Embedding",
"description": "Use Open AI's embedding model to create an embedding vector representing the input text.",
"type": "python",
"module": "promptflow.tools.embedding",
"function": "embedding",
"inputs": {
"connection": {
"type": [
"AzureOpenAIConnection",
"OpenAIConnection"
]
},
"deployment_name": {
"type": [
"string"
],
"enabled_by": "connection",
"enabled_by_type": [
"AzureOpenAIConnection"
],
"capabilities": {
"completion": false,
"chat_completion": false,
"embeddings": true
},
"model_list": [
"text-embedding-ada-002",
"text-search-ada-doc-001",
"text-search-ada-query-001"
]
},
"model": {
"type": [
"string"
],
"enabled_by": "connection",
"enabled_by_type": [
"OpenAIConnection"
],
"enum": [
"text-embedding-ada-002",
"text-search-ada-doc-001",
"text-search-ada-query-001"
],
"allow_manual_entry": true
},
"input": {
"type": [
"string"
]
}
},
"package": "promptflow-tools",
"package_version": "1.0.2"
},
"promptflow.tools.openai_gpt4v.OpenAI.chat": {
"name": "OpenAI GPT-4V",
"description": "Use OpenAI GPT-4V to leverage vision ability.",
"type": "custom_llm",
"module": "promptflow.tools.openai_gpt4v",
"class_name": "OpenAI",
"function": "chat",
"tool_state": "preview",
"icon": {
"light": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAx0lEQVR4nJWSwQ2CQBBFX0jAcjgqXUgPJNiIsQQrIVCIFy8GC6ABDcGDX7Mus9n1Xz7zZ+fPsLPwH4bUg0dD2wMPcbR48Uxq4AKU4iSTDwZ1LhWXipN/B3V0J6hjBTvgLHZNonewBXrgDpzEvXSIjN0BE3AACmmF4kl5F6tNzcCoLpW0SvGovFvsb4oZ2AANcAOu4ka6axCcINN3rg654sww+CYsPD0OwjcozFNh/Qcd78tqVbCIW+n+Fky472Bh/Q6SYb1EEy8tDzd+9IsVPAAAAABJRU5ErkJggg==",
"dark": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAA2ElEQVR4nJXSzW3CQBAF4DUSTjk+Al1AD0ikESslpBIEheRALhEpgAYSWV8OGUublf/yLuP3PPNmdndS+gdwXZrYDmh7fGE/W+wXbaYd8IYm4rxJPnZ0boI3wZcdJxs/n+AwV7DFK7aFyfQdYIMLPvES8YJNf5yp4jMeeEYdWh38gXOR35YGHe5xabvQdsHv6PLi8qV6gycc8YH3iMfQu6Lh4ASr+F5Hh3XwVWnQYzUkVlX1nccplAb1SN6Y/sfgmlK64VS8wimldIv/0yj2QLkHizG0iWP4AVAfQ34DVQONAAAAAElFTkSuQmCC"
},
"default_prompt": "# system:\nAs an AI assistant, your task involves interpreting images and responding to questions about the image.\nRemember to provide accurate answers based on the information present in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n",
"inputs": {
"connection": {
"type": [
"OpenAIConnection"
]
},
"model": {
"enum": [
"gpt-4-vision-preview"
],
"allow_manual_entry": true,
"type": [
"string"
]
},
"temperature": {
"default": 1,
"type": [
"double"
]
},
"top_p": {
"default": 1,
"type": [
"double"
]
},
"max_tokens": {
"default": 512,
"type": [
"int"
]
},
"stop": {
"default": "",
"type": [
"list"
]
},
"presence_penalty": {
"default": 0,
"type": [
"double"
]
},
"frequency_penalty": {
"default": 0,
"type": [
"double"
]
}
},
"package": "promptflow-tools",
"package_version": "1.0.2"
},
"promptflow.tools.open_model_llm.OpenModelLLM.call": {
"name": "Open Model LLM",
"description": "Use an open model from the Azure Model catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion API calls.",
"icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"type": "custom_llm",
"module": "promptflow.tools.open_model_llm",
"class_name": "OpenModelLLM",
"function": "call",
"inputs": {
"endpoint_name": {
"type": [
"string"
],
"dynamic_list": {
"func_path": "promptflow.tools.open_model_llm.list_endpoint_names"
},
"allow_manual_entry": true,
"is_multi_select": false
},
"deployment_name": {
"default": "",
"type": [
"string"
],
"dynamic_list": {
"func_path": "promptflow.tools.open_model_llm.list_deployment_names",
"func_kwargs": [
{
"name": "endpoint",
"type": [
"string"
],
"optional": true,
"reference": "${inputs.endpoint}"
}
]
},
"allow_manual_entry": true,
"is_multi_select": false
},
"api": {
"enum": [
"chat",
"completion"
],
"type": [
"string"
]
},
"temperature": {
"default": 1.0,
"type": [
"double"
]
},
"max_new_tokens": {
"default": 500,
"type": [
"int"
]
},
"top_p": {
"default": 1.0,
"advanced": true,
"type": [
"double"
]
},
"model_kwargs": {
"default": "{}",
"advanced": true,
"type": [
"object"
]
}
},
"package": "promptflow-tools",
"package_version": "1.0.2"
},
"promptflow.tools.serpapi.SerpAPI.search": {
"name": "Serp API",
"description": "Use Serp API to obtain search results from a specific search engine.",
"inputs": {
"connection": {
"type": [
"SerpConnection"
]
},
"engine": {
"default": "google",
"enum": [
"google",
"bing"
],
"type": [
"string"
]
},
"location": {
"default": "",
"type": [
"string"
]
},
"num": {
"default": "10",
"type": [
"int"
]
},
"query": {
"type": [
"string"
]
},
"safe": {
"default": "off",
"enum": [
"active",
"off"
],
"type": [
"string"
]
}
},
"type": "python",
"module": "promptflow.tools.serpapi",
"class_name": "SerpAPI",
"function": "search",
"package": "promptflow-tools",
"package_version": "1.0.2"
},
"my_tool_package.tools.my_tool_1.my_tool": {
"function": "my_tool",
"inputs": {
"connection": {
"type": [
"CustomConnection"
],
"custom_type": [
"MyFirstConnection",
"MySecondConnection"
]
},
"input_text": {
"type": [
"string"
]
}
},
"module": "my_tool_package.tools.my_tool_1",
"name": "My First Tool",
"description": "This is my first tool",
"type": "python",
"package": "test-custom-tools",
"package_version": "0.0.2"
},
"my_tool_package.tools.my_tool_2.MyTool.my_tool": {
"class_name": "MyTool",
"function": "my_tool",
"inputs": {
"connection": {
"type": [
"CustomConnection"
],
"custom_type": [
"MySecondConnection"
]
},
"input_text": {
"type": [
"string"
]
}
},
"module": "my_tool_package.tools.my_tool_2",
"name": "My Second Tool",
"description": "This is my second tool",
"type": "python",
"package": "test-custom-tools",
"package_version": "0.0.2"
},
"my_tool_package.tools.my_tool_with_custom_strong_type_connection.my_tool": {
"function": "my_tool",
"inputs": {
"connection": {
"custom_type": [
"MyCustomConnection"
],
"type": [
"CustomConnection"
]
},
"input_param": {
"type": [
"string"
]
}
},
"module": "my_tool_package.tools.my_tool_with_custom_strong_type_connection",
"name": "Tool With Custom Strong Type Connection",
"description": "This is my tool with custom strong type connection.",
"type": "python",
"package": "test-custom-tools",
"package_version": "0.0.2"
}
}
} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/activate_condition_always_met/inputs.json | {
"text": "hello"
} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/activate_condition_always_met/pass_through.py | from promptflow import tool
@tool
def pass_through(input1: str) -> str:
return 'hello ' + input1
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/activate_condition_always_met/expected_result.json | [
{
"expected_node_count": 3,
"expected_outputs": {
"output": "Node A not executed. Node B not executed."
},
"expected_bypassed_nodes": [
"nodeA",
"nodeB"
]
}
] | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/activate_condition_always_met/flow.dag.yaml | inputs:
text:
type: string
default: hello
outputs:
output:
type: string
reference: ${nodeC.output}
nodes:
- name: nodeA
type: python
source:
type: code
path: pass_through.py
inputs:
input1: ${inputs.text}
activate:
when: ${inputs.text}
is: hi
- name: nodeB
type: python
source:
type: code
path: pass_through.py
inputs:
input1: ${inputs.text}
activate:
when: ${inputs.text}
is: hi
- name: nodeC
type: python
source:
type: code
path: summary_result.py
inputs:
input1: ${nodeA.output}
input2: ${nodeB.output}
activate:
when: dummy
is: dummy
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/activate_condition_always_met/summary_result.py | from promptflow import tool
@tool
def summary_result(input1: str="Node A not executed.", input2: str="Node B not executed.") -> str:
return input1 + ' ' + input2
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image_without_default/pick_an_image.py | import random
from promptflow.contracts.multimedia import Image
from promptflow import tool
@tool
def pick_an_image(image_1: Image, image_2: Image) -> Image:
if random.choice([True, False]):
return image_1
else:
return image_2
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image_without_default/flow.dag.yaml | inputs:
image_1:
type: image
image_2:
type: image
outputs:
output:
type: image
reference: ${python_node.output}
nodes:
- name: python_node
type: python
source:
type: code
path: pick_an_image.py
inputs:
image_1: ${inputs.image_1}
image_2: ${inputs.image_2}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_invalid_default_value/pick_an_image.py | import random
from promptflow.contracts.multimedia import Image
from promptflow import tool
@tool
def pick_an_image(image_1: Image, image_2: Image) -> Image:
if random.choice([True, False]):
return image_1
else:
return image_2
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_invalid_default_value/flow.dag.yaml | inputs:
image:
type: image
default: ""
outputs:
output:
type: image
reference: ${python_node_2.output}
nodes:
- name: python_node
type: python
source:
type: code
path: pick_an_image.py
inputs:
image_1: ${inputs.image}
image_2: logo_2.png
- name: python_node_2
type: python
source:
type: code
path: pick_an_image.py
inputs:
image_1: ${python_node.output}
image_2: logo_2.png
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_with_invalid_additional_include/flow.dag.yaml | inputs:
url:
type: string
default: https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h
outputs:
category:
type: string
reference: ${convert_to_dict.output.category}
evidence:
type: string
reference: ${convert_to_dict.output.evidence}
nodes:
- name: fetch_text_content_from_url
type: python
source:
type: code
path: fetch_text_content_from_url.py
inputs:
url: ${inputs.url}
- name: summarize_text_content
type: llm
source:
type: code
path: summarize_text_content.jinja2
inputs:
deployment_name: gpt-35-turbo
suffix: ''
max_tokens: '128'
temperature: '0.2'
top_p: '1.0'
logprobs: ''
echo: 'False'
stop: ''
presence_penalty: '0'
frequency_penalty: '0'
best_of: '1'
logit_bias: ''
text: ${fetch_text_content_from_url.output}
provider: AzureOpenAI
connection: azure_open_ai_connection
api: completion
module: promptflow.tools.aoai
use_variants: true
- name: prepare_examples
type: python
source:
type: code
path: prepare_examples.py
inputs: {}
- name: classify_with_llm
type: llm
source:
type: code
path: classify_with_llm.jinja2
inputs:
deployment_name: gpt-35-turbo
suffix: ''
max_tokens: '128'
temperature: '0.2'
top_p: '1.0'
logprobs: ''
echo: 'False'
stop: ''
presence_penalty: '0'
frequency_penalty: '0'
best_of: '1'
logit_bias: ''
url: ${inputs.url}
examples: ${prepare_examples.output}
text_content: ${summarize_text_content.output}
provider: AzureOpenAI
connection: azure_open_ai_connection
api: completion
module: promptflow.tools.aoai
- name: convert_to_dict
type: python
source:
type: code
path: convert_to_dict.py
inputs:
input_str: ${classify_with_llm.output}
node_variants:
summarize_text_content:
default_variant_id: variant_1
variants:
variant_0:
node:
type: llm
source:
type: code
path: summarize_text_content.jinja2
inputs:
deployment_name: gpt-35-turbo
suffix: ''
max_tokens: '128'
temperature: '0.2'
top_p: '1.0'
logprobs: ''
echo: 'False'
stop: ''
presence_penalty: '0'
frequency_penalty: '0'
best_of: '1'
logit_bias: ''
text: ${fetch_text_content_from_url.output}
provider: AzureOpenAI
connection: azure_open_ai_connection
api: completion
module: promptflow.tools.aoai
variant_1:
node:
type: llm
source:
type: code
path: summarize_text_content__variant_1.jinja2
inputs:
deployment_name: gpt-35-turbo
suffix: ''
max_tokens: '256'
temperature: '0.2'
top_p: '1.0'
logprobs: ''
echo: 'False'
stop: ''
presence_penalty: '0'
frequency_penalty: '0'
best_of: '1'
logit_bias: ''
text: ${fetch_text_content_from_url.output}
provider: AzureOpenAI
connection: azure_open_ai_connection
api: completion
module: promptflow.tools.aoai
additional_includes:
- ../invalid/file/path | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/activate_with_no_inputs/inputs.json | {
"text": "world"
} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/activate_with_no_inputs/expected_result.json | [
{
"expected_node_count": 2,
"expected_outputs":{
"text": "hello world"
},
"expected_bypassed_nodes":[]
}
] | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/activate_with_no_inputs/flow.dag.yaml | inputs:
text:
type: string
outputs:
text:
type: string
reference: ${node_a.output}
nodes:
- name: node_a
type: python
source:
type: code
path: node_a.py
inputs:
input1: ${inputs.text}
- name: node_b
type: python
source:
type: code
path: node_b.py
inputs: {}
activate:
when: ${node_a.output}
is: hello world
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/activate_with_no_inputs/node_a.py | from promptflow import tool
@tool
def my_python_tool(input1: str) -> str:
return 'hello ' + input1
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/activate_with_no_inputs/node_b.py | from promptflow import tool
@tool
def my_python_tool():
print("Avtivate")
return 'Executing...'
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/tool_with_assistant_definition/echo.py | from promptflow import tool
@tool
def echo(message: str):
"""This tool is used to echo the message back.
:param message: The message to echo.
:type message: str
"""
return message
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/tool_with_assistant_definition/assistant_definition.yaml | model: mock_model
instructions: mock_instructions
tools:
- type: function
tool_type: python
source:
type: code
path: echo.py
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/tool_with_assistant_definition/test_assistant_definition.py | from promptflow import tool
from promptflow.contracts.types import AssistantDefinition
@tool
def test_assistant_definition(message: str, assistant_definition: AssistantDefinition):
assert assistant_definition.model == "mock_model"
assert assistant_definition.instructions == "mock_instructions"
invoker = assistant_definition.init_tool_invoker()
openai_definition = invoker.to_openai_tools()
assert len(openai_definition) == 1
assert openai_definition[0]["function"]["description"] == "This tool is used to echo the message back."
assert openai_definition[0]["function"]["parameters"]["properties"] == {
"message": {"description": "The message to echo.", "type": "string"}
}
assert openai_definition[0]["function"]["parameters"]["required"] == ["message"]
assert invoker.invoke_tool("echo", {"message": message}) == "Hello World!"
return assistant_definition.serialize()
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/tool_with_assistant_definition/flow.dag.yaml | inputs:
message:
type: string
default: Hello World!
outputs:
output:
type: object
reference: ${test_assistant_definition.output}
nodes:
- name: test_assistant_definition
type: python
source:
type: code
path: test_assistant_definition.py
inputs:
message: ${inputs.message}
assistant_definition: assistant_definition.yaml
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/one_line_of_bulktest_timeout/samples.json | [{"idx": 1}, {"idx": 4}, {"idx": 10}] | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/one_line_of_bulktest_timeout/my_python_tool.py | from promptflow import tool
import random
@tool
def my_python_tool(idx: int) -> int:
return idx | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/one_line_of_bulktest_timeout/my_python_tool_with_failed_line.py | from promptflow import tool
import random
import time
@tool
def my_python_tool_with_failed_line(idx: int, mod=5) -> int:
if idx % mod == 0:
while True:
time.sleep(60)
return idx
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/one_line_of_bulktest_timeout/samples_all_timeout.json | [{"idx": 5}, {"idx": 5}] | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/one_line_of_bulktest_timeout/expected_status_summary.json | {
"__pf__.nodes.my_python_tool.completed": 3,
"__pf__.nodes.my_python_tool_with_failed_line.completed": 2,
"__pf__.nodes.my_python_tool_with_failed_line.failed": 1,
"__pf__.lines.completed": 2,
"__pf__.lines.failed": 1
} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/one_line_of_bulktest_timeout/flow.dag.yaml | inputs:
idx:
type: int
outputs:
output:
type: int
reference: ${my_python_tool_with_failed_line.output}
nodes:
- name: my_python_tool
type: python
source:
type: code
path: my_python_tool.py
inputs:
idx: ${inputs.idx}
- name: my_python_tool_with_failed_line
type: python
source:
type: code
path: my_python_tool_with_failed_line.py
inputs:
idx: ${my_python_tool.output} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/all_depedencies_bypassed_with_activate_met/inputs.json | {
"text": "hi"
} | 0 |