repo_id
stringlengths 15
132
| file_path
stringlengths 34
176
| content
stringlengths 2
3.52M
| __index_level_0__
int64 0
0
|
---|---|---|---|
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with___file__/samples.json | [
{
"text": "text_1"
},
{
"text": "text_2"
},
{
"text": "text_3"
},
{
"text": "text_4"
}
] | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with___file__/script_with___file__.py | from pathlib import Path
from promptflow import tool
print(f"The script is {__file__}")
assert Path(__file__).is_absolute(), f"__file__ should be absolute path, got {__file__}"
@tool
def my_python_tool(input1: str) -> str:
from pathlib import Path
assert Path(__file__).name == "script_with___file__.py"
assert __name__ == "__pf_main__"
print(f"Prompt: {input1} {__file__}")
return f"Prompt: {input1} {__file__}"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with___file__/flow.dag.yaml | inputs:
text:
type: string
outputs:
output_prompt:
type: string
reference: ${node1.output}
nodes:
- name: node1
type: python
source:
type: code
path: script_with___file__.py
inputs:
input1: ${inputs.text}
- name: node2
type: python
source:
type: code
path: folder/another-tool.py
inputs:
input1: ${node1.output}
- name: node3
type: python
source:
type: code
path: folder/another-tool.py
inputs:
input1: random value | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with___file__ | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with___file__/folder/another-tool.py | from promptflow import tool
print(f"The script is {__file__}")
@tool
def my_python_tool(input1: str) -> str:
from pathlib import Path
assert Path(__file__).as_posix().endswith("folder/another-tool.py")
assert __name__ == "__pf_main__"
return f"Prompt: {input1} {__file__}"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_list_input/flow.dag.yaml | inputs:
key:
type: list
outputs:
output:
type: string
reference: ${print_val.output.value}
nodes:
- name: print_val
type: python
source:
type: code
path: print_val.py
inputs:
key: ${inputs.key}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_list_input/print_val.py | from typing import List
from promptflow import tool
@tool
def get_val(key):
# get from env var
print(key)
return {"value": f"{key}: {type(key)}"}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/generator_tools/echo.py | from promptflow import tool
from char_generator import character_generator
@tool
def echo(text):
"""Echo the input string."""
echo_text = "Echo - " + "".join(character_generator(text))
return echo_text | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/generator_tools/char_generator.py | from promptflow import tool
@tool
def character_generator(text: str):
"""Generate characters from a string."""
for char in text:
yield char | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/generator_tools/flow.dag.yaml | inputs:
text:
type: string
outputs:
answer:
type: string
reference: ${echo.output}
nodes:
- name: echo
type: python
source:
type: code
path: echo.py
inputs:
text: ${inputs.text}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow-with-nan-inf/nan_inf.py | from promptflow import tool
@tool
def nan_inf(number: int):
print(number)
return {"nan": float("nan"), "inf": float("inf")}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow-with-nan-inf/flow.dag.yaml | inputs:
number:
type: int
outputs:
output:
type: object
reference: ${nan_inf.output}
nodes:
- name: nan_inf
type: python
source:
type: code
path: nan_inf.py
inputs:
number: ${inputs.number}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/README.md | Exported Dockerfile & its dependencies are located in the same folder. The structure is as below:
- flow: the folder contains all the flow files
- ...
- connections: the folder contains yaml files to create all related connections
- ...
- runit: the folder contains all the runit scripts
- ...
- Dockerfile: the dockerfile to build the image
- start.sh: the script used in `CMD` of `Dockerfile` to start the service
- settings.json: a json file to store the settings of the docker image
- README.md: the readme file to describe how to use the dockerfile
Please refer to [official doc](https://microsoft.github.io/promptflow/how-to-guides/deploy-and-export-a-flow.html#export-a-flow)
for more details about how to use the exported dockerfile and scripts.
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/Dockerfile | # syntax=docker/dockerfile:1
FROM docker.io/continuumio/miniconda3:latest
WORKDIR /
COPY ./flow /flow
# create conda environment
RUN conda create -n promptflow-serve python=3.9.16 pip=23.0.1 -q -y && \
conda run -n promptflow-serve \
pip install -r /flow/requirements_txt && \
conda run -n promptflow-serve pip install keyrings.alt && \
conda run -n promptflow-serve pip install gunicorn==20.1.0 && \
conda run -n promptflow-serve pip cache purge && \
conda clean -a -y
RUN apt-get update && apt-get install -y runit
EXPOSE 8080
COPY ./connections/* /connections/
# reset runsvdir
RUN rm -rf /var/runit
COPY ./runit /var/runit
# grant permission
RUN chmod -R +x /var/runit
COPY ./start.sh /
CMD ["bash", "./start.sh"] | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/settings.json | {
"CUSTOM_CONNECTION_AZURE_OPENAI_API_KEY": ""
} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/start.sh | #!/bin/bash
# stop services created by runsv and propagate SIGINT, SIGTERM to child jobs
sv_stop() {
echo "$(date -uIns) - Stopping all runsv services"
for s in $(ls -d /var/runit/*); do
sv stop $s
done
}
# register SIGINT, SIGTERM handler
trap sv_stop SIGINT SIGTERM
# start services in background and wait all child jobs
runsvdir /var/runit &
wait
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/runit | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/runit/promptflow-serve/run | #! /bin/bash
CONDA_ENV_PATH="$(conda info --base)/envs/promptflow-serve"
export PATH="$CONDA_ENV_PATH/bin:$PATH"
ls
ls /connections
pf connection create --file /connections/custom_connection.yaml
echo "start promptflow serving with worker_num: 8, worker_threads: 1"
cd /flow
gunicorn -w 8 --threads 1 -b "0.0.0.0:8080" --timeout 300 "promptflow._sdk._serving.app:create_app()" | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/runit | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/runit/promptflow-serve/finish | #!/bin/bash
echo "$(date -uIns) - promptflow-serve/finish $@"
# stop all gunicorn processes
echo "$(date -uIns) - Stopping all Gunicorn processes"
pkill gunicorn
while pgrep gunicorn >/dev/null; do
echo "$(date -uIns) - Gunicorn process is still running, waiting for 1s"
sleep 1
done
echo "$(date -uIns) - Stopped all Gunicorn processes" | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/connections/custom_connection.yaml | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/CustomConnection.schema.json
type: custom
name: custom_connection
configs:
CHAT_DEPLOYMENT_NAME: gpt-35-turbo
AZURE_OPENAI_API_BASE: https://gpt-test-eus.openai.azure.com/
secrets:
AZURE_OPENAI_API_KEY: ${env:CUSTOM_CONNECTION_AZURE_OPENAI_API_KEY}
module: promptflow.connections
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/flow/user_intent_few_shot.jinja2 | You are given a list of orders with item_numbers from a customer and a statement from the customer. It is your job to identify
the intent that the customer has with their statement. Possible intents can be:
"product return", "product exchange", "general question", "product question", "other".
If the intent is product related ("product return", "product exchange", "product question"), then you should also
provide the order id and item that the customer is referring to in their statement.
For instance if you are give the following list of orders:
order_number: 2020230
date: 2023-04-23
store_location: SeattleStore
items:
- description: Roof Rack, color black, price $199.99
item_number: 101010
- description: Running Shoes, size 10, color blue, price $99.99
item_number: 202020
You are given the following customer statements:
- I am having issues with the jobbing shoes I bought.
Then you should answer with in valid yaml format with the fields intent, order_number, item, and item_number like so:
intent: product question
order_number: 2020230
descrption: Running Shoes, size 10, color blue, price $99.99
item_number: 202020
Here is the actual problem you need to solve:
In triple backticks below is the customer information and a list of orders.
```
{{customer_info}}
```
In triple backticks below are the is the chat history with customer statements and replies from the customer service agent:
```
{{chat_history}}
```
What is the customer's `intent:` here?
"product return", "exchange product", "general question", "product question" or "other"?
Reply with only the intent string.
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/flow/user_intent_zero_shot.jinja2 | You are given a list of orders with item_numbers from a customer and a statement from the customer. It is your job to identify
the intent that the customer has with their statement. Possible intents can be:
"product return", "product exchange", "general question", "product question", "other".
In triple backticks below is the customer information and a list of orders.
```
{{customer_info}}
```
In triple backticks below are the is the chat history with customer statements and replies from the customer service agent:
```
{{chat_history}}
```
What is the customer's `intent:` here?
"product return", "exchange product", "general question", "product question" or "other"?
Reply with only the intent string.
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/flow/requirements_txt | keyrings.alt
promptflow-tools
promptflow
langchain
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/flow/intent.py | import os
import pip
def extract_intent(chat_prompt: str):
from langchain.chat_models import AzureChatOpenAI
from langchain.schema import HumanMessage
if "AZURE_OPENAI_API_KEY" not in os.environ:
# load environment variables from .env file
try:
from dotenv import load_dotenv
except ImportError:
# This can be removed if user using custom image.
pip.main(["install", "python-dotenv"])
from dotenv import load_dotenv
load_dotenv()
chat = AzureChatOpenAI(
deployment_name=os.environ["CHAT_DEPLOYMENT_NAME"],
openai_api_key=os.environ["AZURE_OPENAI_API_KEY"],
openai_api_base=os.environ["AZURE_OPENAI_API_BASE"],
openai_api_type="azure",
openai_api_version="2023-03-15-preview",
temperature=0,
)
reply_message = chat([HumanMessage(content=chat_prompt)])
return reply_message.content
def generate_prompt(customer_info: str, history: list, user_prompt_template: str):
from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.prompts.prompt import PromptTemplate
chat_history_text = "\n".join(
[message["role"] + ": " + message["content"] for message in history]
)
prompt_template = PromptTemplate.from_template(user_prompt_template)
chat_prompt_template = ChatPromptTemplate.from_messages(
[
HumanMessagePromptTemplate(prompt=prompt_template)
]
)
return chat_prompt_template.format_prompt(customer_info=customer_info, chat_history=chat_history_text).to_string()
if __name__ == "__main__":
import json
with open("./data/denormalized-flat.jsonl", "r") as f:
data = [json.loads(line) for line in f.readlines()]
# only ten samples
data = data[:10]
# load template from file
with open("user_intent_zero_shot.md", "r") as f:
user_prompt_template = f.read()
# each test
for item in data:
chat_prompt = generate_prompt(item["customer_info"], item["history"], user_prompt_template)
reply = extract_intent(chat_prompt)
print("=====================================")
# print("Customer info: ", item["customer_info"])
# print("+++++++++++++++++++++++++++++++++++++")
print("Chat history: ", item["history"])
print("+++++++++++++++++++++++++++++++++++++")
print(reply)
print("+++++++++++++++++++++++++++++++++++++")
print(f"Ground Truth: {item['intent']}")
print("=====================================")
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/flow/setup.sh | echo Hello Promptflow!
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/flow/.amlignore | *.ipynb
.venv/
.data/
.env
.vscode/
outputs/
connection.json
.gitignore
README.md
eval_cli.md
data/
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/flow/extract_intent_tool.py | import os
from promptflow import tool
from promptflow.connections import CustomConnection
from intent import extract_intent
@tool
def extract_intent_tool(
chat_prompt,
connection: CustomConnection) -> str:
# set environment variables
for key, value in dict(connection).items():
os.environ[key] = value
# call the entry function
return extract_intent(
chat_prompt=chat_prompt,
) | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/flow/flow.dag.yaml | inputs:
customer_info:
type: string
chat_history:
type: string
outputs:
output:
type: string
reference: ${extract_intent.output}
nodes:
- name: chat_prompt
type: prompt
source:
type: code
path: user_intent_zero_shot.jinja2
inputs: # Please check the generated prompt inputs
customer_info: ${inputs.customer_info}
chat_history: ${inputs.chat_history}
- name: extract_intent
type: python
source:
type: code
path: extract_intent_tool.py
inputs:
chat_prompt: ${chat_prompt.output}
connection: custom_connection
environment:
python_requirements_txt: requirements_txt
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/llm_tool_with_duplicated_inputs/prompt_with_duplicated_inputs.jinja2 | {{prompt}} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/llm_tool_with_duplicated_inputs/flow.dag.yaml | inputs:
text:
type: string
outputs:
output_prompt:
type: string
reference: ${llm_tool_with_duplicated_inputs.output}
nodes:
- name: llm_tool_with_duplicated_inputs
type: llm
provider: AzureOpenAI
api: completion
module: promptflow.tools.aoai
connection: azure_open_ai_connection
source:
type: code
path: prompt_with_duplicated_inputs.jinja2
inputs:
deployment_name: text-ada-001
max_tokens: 16
text: ${inputs.text}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_environment_variables/inputs.jsonl | {"text": "env1"}
{"text": "env2"}
{"text": "env3"}
{"text": "env4"}
{"text": "env5"}
{"text": "env10"} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_environment_variables/flow.dag.yaml | environment_variables:
env1: 2
env2: spawn
env3:
- 1
- 2
- 3
- 4
- 5
env4:
a: 1
b: "2"
inputs:
key:
type: string
outputs:
output:
type: string
reference: ${print_env.output.value}
nodes:
- name: print_env
type: python
source:
type: code
path: print_env.py
inputs:
key: ${inputs.key}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_environment_variables/print_env.py | import os
from promptflow import tool
@tool
def get_env_var(key: str):
print(os.environ.get(key))
# get from env var
return {"value": os.environ.get(key)}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_python_node_streaming_output/stream.py | from promptflow import tool
from typing import Generator, List
def stream(question: str) -> Generator[str, None, None]:
for word in question:
yield word
@tool
def my_python_tool(chat_history: List[dict], question: str) -> dict:
return {"answer": stream(question)}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_python_node_streaming_output/flow.dag.yaml | inputs:
chat_history:
type: list
is_chat_history: true
question:
type: string
is_chat_input: true
outputs:
answer:
type: string
reference: ${stream.output.answer}
is_chat_output: true
nodes:
- name: stream
type: python
source:
type: code
path: stream.py
inputs:
chat_history: ${inputs.chat_history}
question: ${inputs.question} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_with_additional_include/samples.json | [
{
"line_number": 0,
"variant_id": "variant_0",
"groundtruth": "App",
"prediction": "App"
}
]
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_with_additional_include/classify_with_llm.jinja2 | system:
Your task is to classify a given url into one of the following types:
Movie, App, Academic, Channel, Profile, PDF or None based on the text content information.
The classification will be based on the url, the webpage text content summary, or both.
user:
Here are a few examples:
{% for ex in examples %}
URL: {{ex.url}}
Text content: {{ex.text_content}}
OUTPUT:
{"category": "{{ex.category}}", "evidence": "{{ex.evidence}}"}
{% endfor %}
For a given URL : {{url}}, and text content: {{text_content}}.
Classify above url to complete the category and indicate evidence.
OUTPUT:
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_with_additional_include/summarize_text_content__variant_1.jinja2 | system:
Please summarize some keywords of this paragraph and have some details of each keywords.
Do not add any information that is not in the text.
user:
Text: {{text}}
Summary:
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_with_additional_include/prepare_examples.py | from pathlib import Path
from promptflow import tool
# read file from additional includes
lines = open(r"fetch_text_content_from_url.py", "r").readlines()
@tool
def prepare_examples():
if not Path("summarize_text_content.jinja2").exists():
raise Exception("Cannot find summarize_text_content.jinja2")
return [
{
"url": "https://play.google.com/store/apps/details?id=com.spotify.music",
"text_content": "Spotify is a free music and podcast streaming app with millions of songs, albums, and original podcasts. It also offers audiobooks, so users can enjoy thousands of stories. It has a variety of features such as creating and sharing music playlists, discovering new music, and listening to popular and exclusive podcasts. It also has a Premium subscription option which allows users to download and listen offline, and access ad-free music. It is available on all devices and has a variety of genres and artists to choose from.",
"category": "App",
"evidence": "Both",
},
{
"url": "https://www.youtube.com/channel/UC_x5XG1OV2P6uZZ5FSM9Ttw",
"text_content": "NFL Sunday Ticket is a service offered by Google LLC that allows users to watch NFL games on YouTube. It is available in 2023 and is subject to the terms and privacy policy of Google LLC. It is also subject to YouTube's terms of use and any applicable laws.",
"category": "Channel",
"evidence": "URL",
},
{
"url": "https://arxiv.org/abs/2303.04671",
"text_content": "Visual ChatGPT is a system that enables users to interact with ChatGPT by sending and receiving not only languages but also images, providing complex visual questions or visual editing instructions, and providing feedback and asking for corrected results. It incorporates different Visual Foundation Models and is publicly available. Experiments show that Visual ChatGPT opens the door to investigating the visual roles of ChatGPT with the help of Visual Foundation Models.",
"category": "Academic",
"evidence": "Text content",
},
{
"url": "https://ab.politiaromana.ro/",
"text_content": "There is no content available for this text.",
"category": "None",
"evidence": "None",
},
]
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_with_additional_include/flow.dag.yaml | inputs:
url:
type: string
default: https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h
outputs:
category:
type: string
reference: ${convert_to_dict.output.category}
evidence:
type: string
reference: ${convert_to_dict.output.evidence}
nodes:
- name: fetch_text_content_from_url
type: python
source:
type: code
path: fetch_text_content_from_url.py
inputs:
url: ${inputs.url}
- name: summarize_text_content
type: llm
source:
type: code
path: summarize_text_content.jinja2
inputs:
deployment_name: gpt-35-turbo
suffix: ''
max_tokens: '128'
temperature: '0.2'
top_p: '1.0'
logprobs: ''
echo: 'False'
stop: ''
presence_penalty: '0'
frequency_penalty: '0'
best_of: '1'
logit_bias: ''
text: ${fetch_text_content_from_url.output}
provider: AzureOpenAI
connection: azure_open_ai_connection
api: chat
module: promptflow.tools.aoai
use_variants: true
- name: prepare_examples
type: python
source:
type: code
path: prepare_examples.py
inputs: {}
- name: classify_with_llm
type: llm
source:
type: code
path: classify_with_llm.jinja2
inputs:
deployment_name: gpt-35-turbo
suffix: ''
max_tokens: '128'
temperature: '0.2'
top_p: '1.0'
logprobs: ''
echo: 'False'
stop: ''
presence_penalty: '0'
frequency_penalty: '0'
best_of: '1'
logit_bias: ''
url: ${inputs.url}
examples: ${prepare_examples.output}
text_content: ${summarize_text_content.output}
provider: AzureOpenAI
connection: azure_open_ai_connection
api: chat
module: promptflow.tools.aoai
- name: convert_to_dict
type: python
source:
type: code
path: convert_to_dict.py
inputs:
input_str: ${classify_with_llm.output}
node_variants:
summarize_text_content:
default_variant_id: variant_1
variants:
variant_0:
node:
type: llm
source:
type: code
path: summarize_text_content.jinja2
inputs:
deployment_name: gpt-35-turbo
suffix: ''
max_tokens: '128'
temperature: '0.2'
top_p: '1.0'
logprobs: ''
echo: 'False'
stop: ''
presence_penalty: '0'
frequency_penalty: '0'
best_of: '1'
logit_bias: ''
text: ${fetch_text_content_from_url.output}
provider: AzureOpenAI
connection: azure_open_ai_connection
api: chat
module: promptflow.tools.aoai
variant_1:
node:
type: llm
source:
type: code
path: summarize_text_content__variant_1.jinja2
inputs:
deployment_name: gpt-35-turbo
suffix: ''
max_tokens: '256'
temperature: '0.2'
top_p: '1.0'
logprobs: ''
echo: 'False'
stop: ''
presence_penalty: '0'
frequency_penalty: '0'
best_of: '1'
logit_bias: ''
text: ${fetch_text_content_from_url.output}
provider: AzureOpenAI
connection: azure_open_ai_connection
api: chat
module: promptflow.tools.aoai
additional_includes:
- ../external_files/convert_to_dict.py
- ../external_files/fetch_text_content_from_url.py
- ../external_files/summarize_text_content.jinja2
- ../external_files/summarize_text_content.jinja2
- ../external_files
- ../external_files
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/saved_component_spec/parallel.yaml | creation_context:
created_at: xxx
created_by: xxx
created_by_type: xxx
last_modified_at: xxx
last_modified_by: xxx
last_modified_by_type: xxx
description: Create flows that use large language models to classify URLs into multiple
categories.
display_name: web_classification_4
error_threshold: -1
id: azureml:/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.MachineLearningServices/workspaces/xxx/components/xxx/versions/xxx
input_data: ${{inputs.data}}
inputs:
connections.classify_with_llm.connection:
default: azure_open_ai_connection
optional: true
type: string
connections.classify_with_llm.deployment_name:
default: text-davinci-003
optional: true
type: string
connections.classify_with_llm.model:
enum:
- text-davinci-001
- text-davinci-002
- text-davinci-003
- text-curie-001
- text-babbage-001
- text-ada-001
- code-cushman-001
- code-davinci-002
optional: true
type: string
connections.summarize_text_content.connection:
default: azure_open_ai_connection
optional: true
type: string
connections.summarize_text_content.deployment_name:
default: text-davinci-003
optional: true
type: string
connections.summarize_text_content.model:
enum:
- text-davinci-001
- text-davinci-002
- text-davinci-003
- text-curie-001
- text-babbage-001
- text-ada-001
- code-cushman-001
- code-davinci-002
optional: true
type: string
data:
optional: false
type: uri_folder
run_outputs:
optional: true
type: uri_folder
url:
default: https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h
optional: false
type: string
is_deterministic: true
logging_level: INFO
max_concurrency_per_instance: 1
mini_batch_error_threshold: 0
mini_batch_size: '1'
name: web_classification_4
outputs:
debug_info:
type: uri_folder
flow_outputs:
type: uri_folder
retry_settings:
max_retries: 2
timeout: 3600
task:
append_row_to: ${{outputs.flow_outputs}}
code: /subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.MachineLearningServices/workspaces/xxx/codes/xxx/versions/xxx
entry_script: driver/azureml_user/parallel_run/prompt_flow_entry.py
environment: azureml:/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.MachineLearningServices/workspaces/xxx/environments/xxx/versions/xxx
program_arguments: --amlbi_pf_enabled True --amlbi_pf_run_mode component --amlbi_mini_batch_rows
1 --amlbi_file_format jsonl $[[--amlbi_pf_run_outputs ${{inputs.run_outputs}}]]
--amlbi_pf_debug_info ${{outputs.debug_info}} --amlbi_pf_connections "$[[classify_with_llm.connection=${{inputs.connections.classify_with_llm.connection}},]]$[[summarize_text_content.connection=${{inputs.connections.summarize_text_content.connection}},]]"
--amlbi_pf_deployment_names "$[[classify_with_llm.deployment_name=${{inputs.connections.classify_with_llm.deployment_name}},]]$[[summarize_text_content.deployment_name=${{inputs.connections.summarize_text_content.deployment_name}},]]"
--amlbi_pf_model_names "$[[classify_with_llm.model=${{inputs.connections.classify_with_llm.model}},]]$[[summarize_text_content.model=${{inputs.connections.summarize_text_content.model}},]]"
--amlbi_pf_input_url ${{inputs.url}}
type: run_function
type: parallel
version: 1.0.0
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_sys_inject/hello.py | import os
import sys
from promptflow import tool
sys.path.append(f"{os.path.dirname(__file__)}/custom_lib")
from custom_lib.foo import foo
@tool
def my_python_tool(input1: str) -> str:
return foo(param=input1)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_sys_inject/flow.dag.yaml | inputs:
text:
type: string
outputs:
output_prompt:
type: string
reference: ${echo_my_prompt.output}
nodes:
- inputs:
input1: ${inputs.text}
name: echo_my_prompt
type: python
source:
type: code
path: hello.py
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_sys_inject | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_sys_inject/custom_lib/foo.py | def foo(param: str) -> str:
return f"{param} from func foo"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_image_nested_api_calls/passthrough.py | from promptflow import tool
@tool
def passthrough(image, call_passthrough: bool = True):
if call_passthrough:
image = passthrough(image, False)
return image
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_image_nested_api_calls/flow.dag.yaml | inputs:
image:
type: image
default: logo.jpg
outputs:
output:
type: image
reference: ${python_node.output}
nodes:
- name: python_node
type: python
source:
type: code
path: passthrough.py
inputs:
image: ${inputs.image}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/package_tools/flow.dag.yaml | inputs:
text:
type: string
outputs:
output:
type: string
reference: ${search_by_text.output.search_metadata}
nodes:
- name: search_by_text
type: python
source:
type: package
tool: promptflow.tools.serpapi.SerpAPI.search
inputs:
connection: serp_connection
query: ${inputs.text}
num: 1 | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/prompt_tools/samples.json | [
{
"text": "text_1"
},
{
"text": "text_2"
},
{
"text": "text_3"
},
{
"text": "text_4"
}
] | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/prompt_tools/summarize_text_content_prompt.jinja2 | Please summarize the following content in one paragraph. 50 words.
Do not add any information that is not in the content.
Text: {{text}}
Images:
![image]({{image1}})
![ image]({{image2}})
![image ]({{image3}})
![ image ]({{image4}})
Video:
![video]({{video1}})
Summary: | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/prompt_tools/summarize_text_content_prompt.meta.json | {
"name": "summarize_text_content_prompt",
"type": "prompt",
"inputs": {
"text": {
"type": [
"string"
]
},
"image1": {
"type": [
"image"
]
},
"image2": {
"type": [
"image"
]
},
"image3": {
"type": [
"image"
]
},
"image4": {
"type": [
"image"
]
},
"video1": {
"type": [
"string"
]
}
},
"source": "summarize_text_content_prompt.jinja2"
} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/prompt_tools/flow.dag.yaml | inputs:
text:
type: string
outputs:
output_prompt:
type: string
reference: ${summarize_text_content_prompt.output}
nodes:
- name: summarize_text_content_prompt
type: prompt
source:
type: code
path: summarize_text_content_prompt.jinja2
inputs:
text: ${inputs.text} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/print_input_flow/inputs.jsonl | {"text": "text_0"}
{"text": "text_1"}
{"text": "text_2"}
{"text": "text_3"}
{"text": "text_4"}
{"text": "text_5"}
{"text": "text_6"}
{"text": "text_7"}
{"text": "text_8"}
{"text": "text_9"} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/print_input_flow/print_input.py | from promptflow import tool
import sys
@tool
def print_inputs(
text: str = None,
):
print(f"STDOUT: {text}")
print(f"STDERR: {text}", file=sys.stderr)
return text
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/print_input_flow/flow.dag.yaml | inputs:
text:
type: string
outputs:
output_text:
type: string
reference: ${print_input.output}
nodes:
- name: print_input
type: python
source:
type: code
path: print_input.py
inputs:
text: ${inputs.text}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_invalid_import/hello.py | import package_not_exist
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_invalid_import/flow.dag.yaml | inputs:
text:
type: string
outputs:
output_prompt:
type: string
reference: ${echo_my_prompt.output}
nodes:
- inputs:
text: ${inputs.text}
name: echo_my_prompt
type: python
source:
type: code
path: hello.py
node_variants: {}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/unordered_nodes/flow.dag.yaml | name: node_wrong_order
inputs:
text:
type: string
outputs:
result:
type: string
reference: ${third_node}
nodes:
- name: third_node
type: python
source:
type: code
path: test.py
inputs:
text: ${second_node}
- name: first_node
type: python
source:
type: code
path: test.py
inputs:
text: ${inputs.text}
- name: second_node
type: python
source:
type: code
path: test.py
inputs:
text: ${first_node}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_package_tool_with_custom_connection/data.jsonl | {"text": "Hello World!"}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_package_tool_with_custom_connection/flow.dag.yaml | inputs:
text:
type: string
default: Hello!
outputs:
out:
type: string
reference: ${my_first_tool.output}
nodes:
- name: my_first_tool
type: python
source:
type: package
tool: my_tool_package.tools.my_tool_1.my_tool
inputs:
connection: custom_connection_3
input_text: ${inputs.text}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/async_tools_failures/async_fail.py | from promptflow import tool
async def raise_exception_async(s):
msg = f"In raise_exception_async: {s}"
raise Exception(msg)
@tool
async def raise_an_exception_async(s: str):
try:
await raise_exception_async(s)
except Exception as e:
raise Exception(f"In tool raise_an_exception_async: {s}") from e
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/async_tools_failures/flow.dag.yaml | inputs:
text:
type: string
default: dummy_input
outputs:
output_prompt:
type: string
reference: ${async_fail.output}
nodes:
- name: async_fail
type: python
source:
type: code
path: async_fail.py
inputs:
s: ${inputs.text}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with_special_character/script_with_special_character.py | from promptflow import tool
@tool
def print_special_character(input1: str) -> str:
# Add special character to test if file read is working.
return "https://www.bing.com//"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with_special_character/script_with_special_character.meta.json | {
"name": "script_with_special_character",
"type": "python",
"inputs": {
"input1": {
"type": [
"string"
]
}
},
"source": "script_with_special_character.py",
"function": "print_special_character"
} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/simple_flow_with_python_tool_and_aggregate/aggregate_num.py | import statistics
from typing import List
from promptflow import tool
@tool
def aggregate_num(num: List[int]) -> int:
return statistics.mean(num)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/simple_flow_with_python_tool_and_aggregate/divide_num.py | from promptflow import tool
@tool
def divide_num(num: int) -> int:
return (int)(num / 2)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/simple_flow_with_python_tool_and_aggregate/flow.dag.yaml | inputs:
num:
type: int
outputs:
content:
type: string
reference: ${divide_num.output}
aggregate_content:
type: string
reference: ${aggregate_num.output}
nodes:
- name: divide_num
type: python
source:
type: code
path: divide_num.py
inputs:
num: ${inputs.num}
- name: aggregate_num
type: python
source:
type: code
path: aggregate_num.py
inputs:
num: ${divide_num.output}
aggregation: True
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/unordered_nodes_with_activate/flow.dag.yaml | name: node_wrong_order
inputs:
text:
type: string
skip:
type: bool
outputs:
result:
type: string
reference: ${third_node}
nodes:
- name: third_node
type: python
source:
type: code
path: test.py
inputs:
text: ${second_node}
- name: first_node
type: python
source:
type: code
path: test.py
inputs:
text: ${inputs.text}
- name: second_node
type: python
source:
type: code
path: test.py
inputs:
text: ${first_node}
activate:
when: ${inputs.skip}
is: true
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/partial_fail/data.jsonl | {"key": "no"}
{"key": "raise"}
{"key": "matter"}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/partial_fail/flow.dag.yaml | inputs:
key:
type: string
outputs:
output:
type: string
reference: ${print_env.output.value}
nodes:
- name: print_env
type: python
source:
type: code
path: print_env.py
inputs:
key: ${inputs.key}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/partial_fail/print_env.py | import os
from promptflow import tool
@tool
def get_env_var(key: str):
if key == "raise":
raise Exception("expected raise!")
print(os.environ.get(key))
# get from env var
return {"value": os.environ.get(key)}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_v2/samples.json | [
{
"line_number": 0,
"variant_id": "variant_0",
"groundtruth": "App",
"prediction": "App"
}
]
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_v2/convert_to_dict.py | import json
from promptflow import tool
@tool
def convert_to_dict(input_str: str):
try:
return json.loads(input_str)
except Exception as e:
print("input is not valid, error: {}".format(e))
return {"category": "None", "evidence": "None"}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_v2/fetch_text_content_from_url.py | import bs4
import requests
from promptflow import tool
@tool
def fetch_text_content_from_url(url: str):
# Send a request to the URL
try:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35"
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
# Parse the HTML content using BeautifulSoup
soup = bs4.BeautifulSoup(response.text, "html.parser")
soup.prettify()
return soup.get_text()[:2000]
else:
msg = (
f"Get url failed with status code {response.status_code}.\nURL: {url}\nResponse: {response.text[:100]}"
)
print(msg)
return "No available content"
except Exception as e:
print("Get url failed with error: {}".format(e))
return "No available content"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_v2/classify_with_llm.jinja2 | Your task is to classify a given url into one of the following types:
Movie, App, Academic, Channel, Profile, PDF or None based on the text content information.
The classification will be based on the url, the webpage text content summary, or both.
Here are a few examples:
{% for ex in examples %}
URL: {{ex.url}}
Text content: {{ex.text_content}}
OUTPUT:
{"category": "{{ex.category}}", "evidence": "{{ex.evidence}}"}
{% endfor %}
For a given URL : {{url}}, and text content: {{text_content}}.
Classify above url to complete the category and indicate evidence.
OUTPUT:
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_v2/summarize_text_content__variant_1.jinja2 | Please summarize some keywords of this paragraph and have some details of each keywords.
Do not add any information that is not in the text.
Text: {{text}}
Summary:
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_v2/prepare_examples.py | from promptflow import tool
@tool
def prepare_examples():
return [
{
"url": "https://play.google.com/store/apps/details?id=com.spotify.music",
"text_content": "Spotify is a free music and podcast streaming app with millions of songs, albums, and original podcasts. It also offers audiobooks, so users can enjoy thousands of stories. It has a variety of features such as creating and sharing music playlists, discovering new music, and listening to popular and exclusive podcasts. It also has a Premium subscription option which allows users to download and listen offline, and access ad-free music. It is available on all devices and has a variety of genres and artists to choose from.",
"category": "App",
"evidence": "Both",
},
{
"url": "https://www.youtube.com/channel/UC_x5XG1OV2P6uZZ5FSM9Ttw",
"text_content": "NFL Sunday Ticket is a service offered by Google LLC that allows users to watch NFL games on YouTube. It is available in 2023 and is subject to the terms and privacy policy of Google LLC. It is also subject to YouTube's terms of use and any applicable laws.",
"category": "Channel",
"evidence": "URL",
},
{
"url": "https://arxiv.org/abs/2303.04671",
"text_content": "Visual ChatGPT is a system that enables users to interact with ChatGPT by sending and receiving not only languages but also images, providing complex visual questions or visual editing instructions, and providing feedback and asking for corrected results. It incorporates different Visual Foundation Models and is publicly available. Experiments show that Visual ChatGPT opens the door to investigating the visual roles of ChatGPT with the help of Visual Foundation Models.",
"category": "Academic",
"evidence": "Text content",
},
{
"url": "https://ab.politiaromana.ro/",
"text_content": "There is no content available for this text.",
"category": "None",
"evidence": "None",
},
]
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_v2/flow.dag.yaml | inputs:
url:
type: string
default: https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h
outputs:
category:
type: string
reference: ${convert_to_dict.output.category}
evidence:
type: string
reference: ${convert_to_dict.output.evidence}
nodes:
- name: fetch_text_content_from_url
type: python
source:
type: code
path: fetch_text_content_from_url.py
inputs:
url: ${inputs.url}
- name: summarize_text_content
type: llm
source:
type: code
path: summarize_text_content.jinja2
inputs:
deployment_name: gpt-35-turbo
suffix: ''
max_tokens: '128'
temperature: '0.2'
top_p: '1.0'
logprobs: ''
echo: 'False'
stop: ''
presence_penalty: '0'
frequency_penalty: '0'
best_of: '1'
logit_bias: ''
text: ${fetch_text_content_from_url.output}
provider: AzureOpenAI
connection: azure_open_ai_connection
api: completion
module: promptflow.tools.aoai
use_variants: true
- name: prepare_examples
type: python
source:
type: code
path: prepare_examples.py
inputs: {}
- name: classify_with_llm
type: llm
source:
type: code
path: classify_with_llm.jinja2
inputs:
deployment_name: gpt-35-turbo
suffix: ''
max_tokens: '128'
temperature: '0.2'
top_p: '1.0'
logprobs: ''
echo: 'False'
stop: ''
presence_penalty: '0'
frequency_penalty: '0'
best_of: '1'
logit_bias: ''
url: ${inputs.url}
examples: ${prepare_examples.output}
text_content: ${summarize_text_content.output}
provider: AzureOpenAI
connection: azure_open_ai_connection
api: completion
module: promptflow.tools.aoai
- name: convert_to_dict
type: python
source:
type: code
path: convert_to_dict.py
inputs:
input_str: ${classify_with_llm.output}
node_variants:
summarize_text_content:
default_variant_id: variant_1
variants:
variant_0:
node:
type: llm
source:
type: code
path: summarize_text_content.jinja2
inputs:
deployment_name: gpt-35-turbo
suffix: ''
max_tokens: '128'
temperature: '0.2'
top_p: '1.0'
logprobs: ''
echo: 'False'
stop: ''
presence_penalty: '0'
frequency_penalty: '0'
best_of: '1'
logit_bias: ''
text: ${fetch_text_content_from_url.output}
provider: AzureOpenAI
connection: azure_open_ai_connection
api: completion
module: promptflow.tools.aoai
variant_1:
node:
type: llm
source:
type: code
path: summarize_text_content__variant_1.jinja2
inputs:
deployment_name: gpt-35-turbo
suffix: ''
max_tokens: '256'
temperature: '0.2'
top_p: '1.0'
logprobs: ''
echo: 'False'
stop: ''
presence_penalty: '0'
frequency_penalty: '0'
best_of: '1'
logit_bias: ''
text: ${fetch_text_content_from_url.output}
provider: AzureOpenAI
connection: azure_open_ai_connection
api: completion
module: promptflow.tools.aoai
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_v2/summarize_text_content.jinja2 | Please summarize the following text in one paragraph. 100 words.
Do not add any information that is not in the text.
Text: {{text}}
Summary:
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_v2 | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_v2/.promptflow/flow.tools.json | {
"package": {},
"code": {
"fetch_text_content_from_url.py": {
"type": "python",
"inputs": {
"url": {
"type": [
"string"
]
}
},
"function": "fetch_text_content_from_url"
},
"summarize_text_content.jinja2": {
"type": "llm",
"inputs": {
"text": {
"type": [
"string"
]
}
},
"description": "Summarize webpage content into a short paragraph."
},
"summarize_text_content__variant_1.jinja2": {
"type": "llm",
"inputs": {
"text": {
"type": [
"string"
]
}
}
},
"prepare_examples.py": {
"type": "python",
"function": "prepare_examples"
},
"classify_with_llm.jinja2": {
"type": "llm",
"inputs": {
"url": {
"type": [
"string"
]
},
"examples": {
"type": [
"string"
]
},
"text_content": {
"type": [
"string"
]
}
},
"description": "Multi-class classification of a given url and text content."
},
"convert_to_dict.py": {
"type": "python",
"inputs": {
"input_str": {
"type": [
"string"
]
}
},
"function": "convert_to_dict"
}
}
}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/async_tools_with_sync_tools/sync_passthrough.py | from promptflow import tool
import time
@tool
def passthrough_str_and_wait_sync(input1: str, wait_seconds=3) -> str:
assert isinstance(input1, str), f"input1 should be a string, got {input1}"
print(f"Wait for {wait_seconds} seconds in sync function")
for i in range(wait_seconds):
print(i)
time.sleep(1)
return input1
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/async_tools_with_sync_tools/flow.dag.yaml | inputs:
input_str:
type: string
default: Hello
outputs:
ouput1:
type: string
reference: ${async_passthrough1.output}
output2:
type: string
reference: ${sync_passthrough1.output}
nodes:
- name: async_passthrough
type: python
source:
type: code
path: async_passthrough.py
inputs:
input1: ${inputs.input_str}
wait_seconds: 1
- name: async_passthrough1
type: python
source:
type: code
path: async_passthrough.py
inputs:
input1: ${async_passthrough.output}
wait_seconds: 10
wait_seconds_in_cancellation: 1
- name: sync_passthrough1
type: python
source:
type: code
path: sync_passthrough.py
inputs:
input1: ${async_passthrough.output}
wait_seconds: 10
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/async_tools_with_sync_tools/async_passthrough.py | from promptflow import tool
import asyncio
@tool
async def passthrough_str_and_wait(input1: str, wait_seconds=3, wait_seconds_in_cancellation=1) -> str:
assert isinstance(input1, str), f"input1 should be a string, got {input1}"
try:
print(f"Wait for {wait_seconds} seconds in async function")
for i in range(wait_seconds):
print(i)
await asyncio.sleep(1)
except asyncio.CancelledError:
print(f"Async function is cancelled, wait for {wait_seconds_in_cancellation}"
" in cancellation process")
for i in range(wait_seconds_in_cancellation):
print(f"Wait for {i} seconds in async tool cancellation logic")
await asyncio.sleep(1)
print(f"End time consuming cancellation process")
raise
return input1
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_langchain_traces/test_langchain_traces.py | import os
from langchain.chat_models import AzureChatOpenAI
from langchain_core.messages import HumanMessage
from langchain.agents.agent_types import AgentType
from langchain.agents.initialize import initialize_agent
from langchain.agents.load_tools import load_tools
from promptflow import tool
from promptflow.connections import AzureOpenAIConnection
from promptflow.integrations.langchain import PromptFlowCallbackHandler
@tool
def test_langchain_traces(question: str, conn: AzureOpenAIConnection):
os.environ["AZURE_OPENAI_API_KEY"] = conn.api_key
os.environ["OPENAI_API_VERSION"] = conn.api_version
os.environ["AZURE_OPENAI_ENDPOINT"] = conn.api_base
model = AzureChatOpenAI(
temperature=0.7,
azure_deployment="gpt-35-turbo",
)
tools = load_tools(["llm-math"], llm=model)
# Please keep use agent to enable customized CallBack handler
agent = initialize_agent(
tools, model, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=False,
callbacks=[PromptFlowCallbackHandler()]
)
message = HumanMessage(
content=question
)
try:
return agent.run(message)
except Exception as e:
return str(e)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_langchain_traces/samples.json | [
{
"question": "What is 2 to the 10th power?"
},
{
"question": "What is the sum of 2 and 2?"
}
] | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_langchain_traces/inputs.jsonl | {"question": "What is 2 to the 10th power?"}
{"question": "What is the sum of 2 and 2?"} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_langchain_traces/code_first_input.csv | question
What is 2 to the 10th power?
What is the sum of 2 and 2?
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_langchain_traces/data_inputs.json | {
"data": "code_first_input.csv"
} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_langchain_traces/flow.dag.yaml | inputs:
question:
type: string
outputs:
output:
type: string
reference: ${test_langchain_traces.output}
nodes:
- name: test_langchain_traces
type: python
source:
type: code
path: test_langchain_traces.py
inputs:
question: ${inputs.question}
conn: azure_open_ai_connection
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/long_run/long_run.py | import time
from promptflow import tool
def f1():
time.sleep(61)
return 0
def f2():
return f1()
@tool
def long_run_func():
return f2()
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/long_run/flow.dag.yaml | inputs: {}
outputs:
output:
type: string
reference: ${long_run_node.output}
nodes:
- name: long_run_node
type: python
inputs: {}
source:
type: code
path: long_run.py
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat_flow/show_answer.py | from promptflow import tool
@tool
def show_answer(chat_answer: str):
print("print:", chat_answer)
return chat_answer
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat_flow/chat.jinja2 | system:
You are a helpful assistant.
{% for item in chat_history %}
user:
{{item.inputs.question}}
assistant:
{{item.outputs.answer}}
{% endfor %}
user:
{{question}} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat_flow/flow.dag.yaml | inputs:
chat_history:
type: list
question:
type: string
is_chat_input: true
default: What is ChatGPT?
outputs:
answer:
type: string
reference: ${show_answer.output}
is_chat_output: true
nodes:
- inputs:
deployment_name: gpt-35-turbo
max_tokens: "256"
temperature: "0.7"
chat_history: ${inputs.chat_history}
question: ${inputs.question}
name: chat_node
type: llm
source:
type: code
path: chat.jinja2
api: chat
provider: AzureOpenAI
connection: azure_open_ai_connection
- name: show_answer
type: python
source:
type: code
path: show_answer.py
inputs:
chat_answer: ${chat_node.output}
node_variants: {}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/prompt_tool_with_duplicated_inputs/prompt_with_duplicated_inputs.jinja2 | {{template}} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/prompt_tool_with_duplicated_inputs/flow.dag.yaml | inputs:
text:
type: string
outputs:
output_prompt:
type: string
reference: ${prompt_tool_with_duplicated_inputs.output}
nodes:
- name: prompt_tool_with_duplicated_inputs
type: prompt
source:
type: code
path: prompt_with_duplicated_inputs.jinja2
inputs:
text: ${inputs.text} | 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/data.jsonl | {"chat_history":[], "question": "If I am going to run with 1.5 hours this morning, how many calories will I burn?", "assistant_id": "asst_yWhdFYoCS1UatnRRQZGY85aL", "thread_id": ""}
{"chat_history":[], "question": "I'm going to swim in Guangzhou city today for 30 min, how much calories will I burn?", "assistant_id": "asst_yWhdFYoCS1UatnRRQZGY85aL", "thread_id": ""}
{"chat_history":[], "question": "I'm going to run slowly on local street today, how much calories will I burn?", "assistant_id": "asst_yWhdFYoCS1UatnRRQZGY85aL", "thread_id": ""}
{"chat_history":[], "question": "If I am going to run 1.5 hours under 24 degrees Celsius, how many calories will I burn", "assistant_id": "asst_yWhdFYoCS1UatnRRQZGY85aL", "thread_id": ""}
{"chat_history":[], "question": "I'm going to biking for 2 hours duration today, how much calories will I burn?", "assistant_id": "asst_yWhdFYoCS1UatnRRQZGY85aL", "thread_id": ""}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/get_temperature.py | import random
import time
from promptflow import tool
@tool
def get_temperature(city: str, unit: str = "c"):
"""Estimate the current temperature of a given city.
:param city: city to get the estimated temperature for.
:type city: str
:param unit: the unit of the temperature, either 'c' for Celsius or 'f' for Fahrenheit.
Defaults to Celsius ('c').
:type unit: str
"""
# Generating a random number between 0.2 and 1 for tracing purpose
time.sleep(random.uniform(0.2, 1))
return random.uniform(0, 35)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/assistant_definition.yaml | model: gpt-4-1106-preview
instructions: You are a helpful assistant.
tools:
- type: code_interpreter
- type: function
source:
type: code
path: get_calorie_by_jogging.py
tool_type: python
- type: function
source:
type: code
path: get_calorie_by_swimming.py
tool_type: python
- type: function
source:
type: code
path: get_current_city.py
tool_type: python
- type: function
source:
type: code
path: get_temperature.py
tool_type: python
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/get_calorie_by_swimming.py | import random
import time
from promptflow import tool
@tool
def get_calorie_by_swimming(duration: float, temperature: float):
"""Estimate the calories burned by swimming based on duration and temperature.
:param duration: the length of the swimming in hours.
:type duration: float
:param temperature: the environment temperature in degrees Celsius.
:type temperature: float
"""
print(
f"Figure out the calories burned by swimming, with temperature of {temperature} degrees Celsius, "
f"and duration of {duration} hours."
)
# Generating a random number between 0.2 and 1 for tracing purpose
time.sleep(random.uniform(0.2, 1))
return random.randint(100, 200)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/README.md | # Chat with Calorie Assistant
This sample demonstrates how to chat with the PromptFlow Assistant tool facilitates calorie calculations by considering your location, the duration of your exercise, and the type of sport. Currently, it supports two types of sports: jogging and swimming.
Tools used in this flow:
- `add_message_and_run` tool, assistant tool, provisioned with below inner functions:
- `get_current_location``: get current city
- `get_temperature(location)``: get temperature of the city
- `get_calorie_by_jogging(duration, temperature)``: calculate calorie for jogging exercise
- `get_calorie_by_jogging(duration, temperature)``: calculate calorie for swimming exercise
## Prerequisites
Install promptflow sdk and other dependencies in this folder:
```sh
pip install -r requirements.txt
```
## What you will learn
In this flow, you will understand how assistant tools within PromptFlow are triggered by user prompts. The assistant tool decides which internal functions or tools to invoke based on the input provided. Your responsibility involves implementing each of these tools and registering them in the `assistant_definition`. Additionally, be aware that the tools may have dependencies on each other, affecting the order and manner of their invocation.
## Getting started
### 1. Create assistant connection (openai)
Go to "Prompt flow" "Connections" tab. Click on "Create" button, select one of assistant tool supported connection types and fill in the configurations.
Currently, only "Open AI" connection type are supported for assistant tool. Please refer to [OpenAI](https://platform.openai.com/) for more details.
```bash
# Override keys with --set to avoid yaml file changes
pf connection create --file ../../../connections/openai.yml --set api_key=<your_api_key>
```
Note in [flow.dag.yaml](flow.dag.yaml) we are using connection named `open_ai_connection`.
```bash
# show registered connection
pf connection show --name open_ai_connection
```
### 2. Create or get assistant/thread
Navigate to the OpenAI Assistant page and create an assistant if you haven't already. Once created, click on the 'Test' button to enter the assistant's playground. Make sure to note down the assistant_id.
**[Optional]** Start a chat session to create thread automatically. Keep track of the thread_id.
### 3. run the flow
```bash
# run chat flow with default question in flow.dag.yaml
pf flow test --flow . --interactive --multi-modal --user-agent "prompt-flow-extension/1.8.0 (win32; x64) VSCode/1.85.1"
```
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/get_or_create_thread.py | from openai import AsyncOpenAI
from promptflow import tool
from promptflow.connections import OpenAIConnection
@tool
async def get_or_create_thread(conn: OpenAIConnection, thread_id: str):
if thread_id:
return thread_id
cli = AsyncOpenAI(api_key=conn.api_key, organization=conn.organization)
thread = await cli.beta.threads.create()
return thread.id
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows | promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/get_current_city.py | import random
import time
from promptflow import tool
@tool
def get_current_city():
"""Get current city."""
# Generating a random number between 0.2 and 1 for tracing purpose
time.sleep(random.uniform(0.2, 1))
return random.choice(["Beijing", "Shanghai"])
| 0 |