repo_id
stringlengths 15
132
| file_path
stringlengths 34
176
| content
stringlengths 2
3.52M
| __index_level_0__
int64 0
0
|
---|---|---|---|
promptflow_repo/promptflow/examples | promptflow_repo/promptflow/examples/connections/azure_openai.yml | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/AzureOpenAIConnection.schema.json
name: open_ai_connection
type: azure_open_ai
api_key: "<user-input>"
api_base: "aoai-api-endpoint"
api_type: "azure"
| 0 |
promptflow_repo/promptflow/examples | promptflow_repo/promptflow/examples/connections/openai.yml | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/OpenAIConnection.schema.json
name: open_ai_connection
type: open_ai
api_key: "<user-input>"
organization: "" # optional
| 0 |
promptflow_repo/promptflow/examples | promptflow_repo/promptflow/examples/connections/requirements.txt | promptflow
promptflow-tools
python-dotenv
| 0 |
promptflow_repo/promptflow/examples | promptflow_repo/promptflow/examples/connections/cognitive_search.yml | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/CognitiveSearchConnection.schema.json
name: cognitive_search_connection
type: cognitive_search
api_key: "<to-be-replaced>"
api_base: "endpoint"
api_version: "2023-07-01-Preview"
| 0 |
promptflow_repo/promptflow/examples | promptflow_repo/promptflow/examples/connections/connection.ipynb | %pip install -r ../requirements.txtfrom promptflow import PFClient
# client can help manage your runs and connections.
client = PFClient()from promptflow.entities import AzureOpenAIConnection
# Initialize an AzureOpenAIConnection object
connection = AzureOpenAIConnection(
name="my_azure_open_ai_connection",
api_key="<your-api-key>",
api_base="<your-endpoint>",
)
# Create the connection, note that api_key will be scrubbed in the returned result
result = client.connections.create_or_update(connection)
print(result)from promptflow.entities import CustomConnection
# Initialize a custom connection object
connection = CustomConnection(
name="my_custom_connection",
# Secrets is a required field for custom connection
secrets={"my_key": "<your-api-key>"},
configs={"endpoint": "<your-endpoint>", "other_config": "other_value"},
)
# Create the connection, note that all secret values will be scrubbed in the returned result
result = client.connections.create_or_update(connection)
print(result)connections = client.connections.list()
for connection in connections:
print(connection)connection = client.connections.get(name="my_custom_connection")
print(connection)connection = client.connections.get(name="my_azure_open_ai_connection")
connection.api_base = "new_value"
connection.api_key = (
"<original-key>" # secrets are required again when updating connection using sdk
)
result = client.connections.create_or_update(connection)
print(connection)connection = client.connections.get(name="my_custom_connection")
connection.configs["other_config"] = "new_value"
connection.secrets[
"my_key"
] = "new_secret_value" # ValueError: Connection 'my_custom_connection' secrets ['my_key'] must be filled again when updating it.
result = client.connections.create_or_update(connection)
print(connection)# client.connections.delete(name="my_custom_connection") | 0 |
promptflow_repo/promptflow/examples/tutorials | promptflow_repo/promptflow/examples/tutorials/get-started/quickstart-azure.ipynb | %pip install -r ../../requirements.txtimport json
# Import required libraries
from azure.identity import DefaultAzureCredential, InteractiveBrowserCredential
# azure version promptflow apis
from promptflow.azure import PFClienttry:
credential = DefaultAzureCredential()
# Check if given credential can get token successfully.
credential.get_token("https://management.azure.com/.default")
except Exception as ex:
# Fall back to InteractiveBrowserCredential in case DefaultAzureCredential not work
credential = InteractiveBrowserCredential()# Get a handle to workspace
pf = PFClient.from_config(credential=credential)# load flow
flow = "../../flows/standard/web-classification"
data = "../../flows/standard/web-classification/data.jsonl"# create run
base_run = pf.run(
flow=flow,
data=data,
)
print(base_run)pf.stream(base_run)details = pf.get_details(base_run)
details.head(10)pf.visualize(base_run)eval_run = pf.run(
flow="../../flows/evaluation/eval-classification-accuracy",
data=data,
run=base_run,
column_mapping={
"groundtruth": "${data.answer}",
"prediction": "${run.outputs.category}",
},
)pf.stream(eval_run)details = pf.get_details(eval_run)
details.head(10)metrics = pf.get_metrics(eval_run)
print(json.dumps(metrics, indent=4))pf.visualize([base_run, eval_run])# use the variant1 of the summarize_text_content node.
variant_run = pf.run(
flow=flow,
data=data,
variant="${summarize_text_content.variant_1}", # here we specify node "summarize_text_content" to use variant 1 version.
)pf.stream(variant_run)details = pf.get_details(variant_run)
details.head(10)eval_flow = "../../flows/evaluation/eval-classification-accuracy"
eval_run_variant = pf.run(
flow=eval_flow,
data="../../flows/standard/web-classification/data.jsonl", # path to the data file
run=variant_run, # use run as the variant
column_mapping={
# reference data
"groundtruth": "${data.answer}",
# reference the run's output
"prediction": "${run.outputs.category}",
},
)pf.stream(eval_run_variant)details = pf.get_details(eval_run_variant)
details.head(10)metrics = pf.get_metrics(eval_run_variant)
print(json.dumps(metrics, indent=4))pf.visualize([eval_run, eval_run_variant]) | 0 |
promptflow_repo/promptflow/examples/tutorials | promptflow_repo/promptflow/examples/tutorials/get-started/flow-as-function.ipynb | from promptflow import load_flow
flow_path = "../../flows/standard/web-classification"
sample_url = "https://www.youtube.com/watch?v=o5ZQyXaAv1g"
f = load_flow(source=flow_path)
result = f(url=sample_url)
print(result)# provide parameters to create connection
conn_name = "new_ai_connection"
api_key = "<user-input>"
api_base = "<user-input>"
api_version = "<user-input>"# create needed connection
import promptflow
from promptflow.entities import AzureOpenAIConnection, OpenAIConnection
# Follow https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource?pivots=web-portal to create an Azure Open AI resource.
connection = AzureOpenAIConnection(
name=conn_name,
api_key=api_key,
api_base=api_base,
api_type="azure",
api_version=api_version,
)
# use this if you have an existing OpenAI account
# connection = OpenAIConnection(
# name=conn_name,
# api_key=api_key,
# )f = load_flow(
source=flow_path,
)
# directly use connection created above
f.context.connections = {"classify_with_llm": {"connection": connection}}
result = f(url=sample_url)
print(result)from promptflow.entities import FlowContext
f = load_flow(source=flow_path)
f.context = FlowContext(
# node "fetch_text_content_from_url" will take inputs from the following command instead of from flow input
overrides={"nodes.fetch_text_content_from_url.inputs.url": sample_url},
)
# the url="unknown" will not take effect
result = f(url="unknown")
print(result)f = load_flow(source="../../flows/chat/basic-chat")
f.context.streaming = True
result = f(
chat_history=[
{
"inputs": {"chat_input": "Hi"},
"outputs": {"chat_output": "Hello! How can I assist you today?"},
}
],
question="How are you?",
)
answer = ""
# the result will be a generator, iterate it to get the result
for r in result["answer"]:
answer += r
print(answer) | 0 |
promptflow_repo/promptflow/examples/tutorials | promptflow_repo/promptflow/examples/tutorials/get-started/quickstart.ipynb | %pip install -r ../../requirements.txtimport json
from promptflow import PFClient
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection
# client can help manage your runs and connections.
pf = PFClient()try:
conn_name = "open_ai_connection"
conn = pf.connections.get(name=conn_name)
print("using existing connection")
except:
# Follow https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource?pivots=web-portal to create an Azure Open AI resource.
connection = AzureOpenAIConnection(
name=conn_name,
api_key="<test_key>",
api_base="<test_base>",
api_type="azure",
api_version="<test_version>",
)
# use this if you have an existing OpenAI account
# connection = OpenAIConnection(
# name=conn_name,
# api_key="<user-input>",
# )
conn = pf.connections.create_or_update(connection)
print("successfully created connection")
print(conn)flow = "../../flows/standard/web-classification" # path to the flow directory# Test flow
flow_inputs = {
"url": "https://play.google.com/store/apps/details?id=com.twitter.android",
}
flow_result = pf.test(flow=flow, inputs=flow_inputs)
print(f"Flow result: {flow_result}")# Test single node in the flow
node_name = "fetch_text_content_from_url"
node_inputs = {
"url": "https://play.google.com/store/apps/details?id=com.twitter.android"
}
flow_result = pf.test(flow=flow, inputs=node_inputs, node=node_name)
print(f"Node result: {flow_result}")from promptflow import load_flow
flow_func = load_flow(flow)
flow_result = flow_func(**flow_inputs)
print(f"Flow function result: {flow_result}")data = "../../flows/standard/web-classification/data.jsonl" # path to the data file
# create run with default variant
base_run = pf.run(flow=flow, data=data, stream=True)details = pf.get_details(base_run)
details.head(10)eval_flow = "../../flows/evaluation/eval-classification-accuracy"
eval_run = pf.run(
flow=eval_flow,
data="../../flows/standard/web-classification/data.jsonl", # path to the data file
run=base_run, # specify base_run as the run you want to evaluate
column_mapping={
"groundtruth": "${data.answer}",
"prediction": "${run.outputs.category}",
}, # map the url field from the data to the url input of the flow
stream=True,
)details = pf.get_details(eval_run)
details.head(10)metrics = pf.get_metrics(eval_run)
print(json.dumps(metrics, indent=4))pf.visualize([base_run, eval_run])# use the variant1 of the summarize_text_content node.
variant_run = pf.run(
flow=flow,
data=data,
variant="${summarize_text_content.variant_1}", # here we specify node "summarize_text_content" to use variant 1 version.
stream=True,
)details = pf.get_details(variant_run)
details.head(10)eval_flow = "../../flows/evaluation/eval-classification-accuracy"
eval_run_variant = pf.run(
flow=eval_flow,
data="../../flows/standard/web-classification/data.jsonl", # path to the data file
run=variant_run, # use run as the variant
column_mapping={
"groundtruth": "${data.answer}",
"prediction": "${run.outputs.category}",
}, # map the url field from the data to the url input of the flow
stream=True,
)details = pf.get_details(eval_run_variant)
details.head(10)metrics = pf.get_metrics(eval_run_variant)
print(json.dumps(metrics, indent=4))pf.visualize([eval_run, eval_run_variant]) | 0 |
promptflow_repo/promptflow/examples/tutorials | promptflow_repo/promptflow/examples/tutorials/run-management/cloud-run-management.ipynb | %pip install -r ../../requirements.txtfrom azure.identity import DefaultAzureCredential, InteractiveBrowserCredential
from azure.ai.ml.entities import Data
from azure.core.exceptions import ResourceNotFoundError
from promptflow.azure import PFClient
from promptflow.entities import Runtry:
credential = DefaultAzureCredential()
# Check if given credential can get token successfully.
credential.get_token("https://management.azure.com/.default")
except Exception as ex:
# Fall back to InteractiveBrowserCredential in case DefaultAzureCredential not work
credential = InteractiveBrowserCredential()# Get a handle to workspace
pf = PFClient.from_config(credential=credential)data_name, data_version = "flow_run_test_data", "1"
try:
data = pf.ml_client.data.get(name=data_name, version=data_version)
except ResourceNotFoundError:
data = Data(
name=data_name,
version=data_version,
path=f"../../flows/standard/web-classification/data.jsonl",
type="uri_file",
)
data = pf.ml_client.data.create_or_update(data)data_id = f"azureml:{data.name}:{data.version}"
print(data_id)# create run
run = Run(
# local flow file
flow="../../flows/standard/web-classification",
# remote data
data=data_id,
# to customize runtime instance type and idle time, you can provide them in resources
# resources={
# "instance_type": "STANDARD_DS11_V2",
# "idle_time_before_shutdown_minutes": 10
# }
)
base_run = pf.runs.create_or_update(
run=run,
# to reset automatic runtime to clean state, set reset_runtime to True
# reset_runtime=True,
)pf.runs.stream(base_run)run = Run(
# local flow file
flow="../../flows/standard/web-classification",
# run name
run=run,
column_mapping={
# reference another run's input data columns
"url": "${run.inputs.url}",
"answer": "${run.inputs.answer}",
"evidence": "${run.inputs.evidence}",
},
)
base_run = pf.runs.create_or_update(
run=run,
)
pf.runs.stream(base_run)run = Run(
# local flow file
flow="../../flows/standard/web-classification",
data="../../flows/standard/web-classification/data.jsonl",
# override connection for node classify_with_llm & summarize_text_content
connections={
"classify_with_llm": {"connection": "azure_open_ai_connection"},
"summarize_text_content": {"connection": "azure_open_ai_connection"},
},
)
base_run = pf.runs.create_or_update(
run=run,
)
pf.runs.stream(base_run)# create run
run = Run(
# local flow file
flow="../../flows/standard/web-classification",
# remote data
data=data_id,
)
# comment below create operation as it requires a runtime with specific name
# which will break CI pipeline
# base_run = pf.runs.create_or_update(
# run=run,
# runtime = "<runtime-name>" # TODO replace with your runtime name
# ) | 0 |
promptflow_repo/promptflow/examples/tutorials | promptflow_repo/promptflow/examples/tutorials/run-management/run-management.ipynb | %pip install -r ../../requirements.txtimport json
from promptflow import PFClient
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection
# client can help manage your runs and connections.
pf = PFClient()try:
conn_name = "open_ai_connection"
conn = pf.connections.get(name=conn_name)
print("using existing connection")
except:
# Follow https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource?pivots=web-portal to create an Azure Open AI resource.
connection = AzureOpenAIConnection(
name=conn_name,
api_key="<test_key>",
api_base="<test_base>",
api_type="azure",
api_version="<test_version>",
)
# use this if you have an existing OpenAI account
# connection = OpenAIConnection(
# name=conn_name,
# api_key="<user-input>",
# )
conn = pf.connections.create_or_update(connection)
print("successfully created connection")
print(conn)from promptflow._sdk._load_functions import load_run
# load a run from YAML file
base_run = load_run(
source="../../flows/standard/web-classification/run.yml",
# override the default params in the YAML file
params_override=[{"column_mapping": {"url": "${data.url}"}}],
)
# create the run
base_run = pf.runs.create_or_update(run=base_run)details = pf.get_details(base_run)
details.head(10)from promptflow.entities import Run
# directly create the run object
run = Run(
# local flow file
flow="../../flows/standard/web-classification",
# run name
run=base_run,
column_mapping={
# reference another run's inputs data column
"url": "${run.inputs.url}",
},
)
base_run = pf.runs.create_or_update(
run=run,
)
pf.runs.stream(base_run)run = Run(
# local flow file
flow="../../flows/standard/web-classification",
data="../../flows/standard/web-classification/data.jsonl",
# override connection for node classify_with_llm & summarize_text_content
# you can replace connection to your local connections
connections={
"classify_with_llm": {"connection": "open_ai_connection"},
"summarize_text_content": {"connection": "open_ai_connection"},
},
)
base_run = pf.runs.create_or_update(
run=run,
)
pf.runs.stream(base_run) | 0 |
promptflow_repo/promptflow/examples/tutorials | promptflow_repo/promptflow/examples/tutorials/flow-fine-tuning-evaluation/promptflow-quality-improvement.md | ---
resources: examples/connections/azure_openai.yml, examples/flows/chat/basic-chat, examples/flows/chat/chat-math-variant, examples/flows/evaluation/eval-chat-math
---
# Tutorial: How prompt flow helps on quality improvement
This tutorial is designed to enhance your understanding of improving flow quality through prompt tuning and evaluation.
Embark on a journey to overcome the inherent randomness of Language Models (LLMs) and enhance output reliability through **prompt fine-tuning** with this comprehensive tutorial. Explore how prompt flow can simplify this process, enabling you to swiftly build high-quality, LLM-native apps.
Prompt fine-tuning involves optimizing the input prompts given to an LLM. This strategic adjustment helps the model to focus on specific information needed for a task, thereby improving the accuracy and reliability of the LLM's responses.
When we talk about "high quality", it's not just about accuracy. It's equally important to strike a balance between the accuracy and the token cost of the LLM. Spend just 15 minutes with us to discover how prompt flow expedites the process of prompt tuning, testing, and evaluation, guiding you towards finding the ideal prompt **(accuracy ↑,token ↓)**
<img src="./media/realcase.png" alt="comparison result" width=60%>
## Video tutorial
Before practicing, you can watch the video for a quick understand. This video shows how to use the **prompt flow VS code extension** to develop your chat flow, fine tune the prompt, batch test the flow, and evaluate the quality.
<a href="http://www.youtube.com/watch?feature=player_embedded&v=gcIe6nk2gA4
" target="_blank"><img src="./media/Screenshot-video.png"
alt="video demo" border="5" /></a>
## Hands-on practice
* Option 1 - VS Code Extension: [Install the prompt flow extension](https://marketplace.visualstudio.com/items?itemName=prompt-flow.prompt-flow) in VS Code and follow the [video tutorial](https://youtu.be/gcIe6nk2gA4) above for a guided practice.
* Option 2 - CLI:Follow the steps below to gain hands-on experience with the prompt flow CLI.
It's time to put theory into practice! Execute our sample and witness the effects.
### Prerequisite
Before moving ahead, ensure you've completed the [Quick Start](../../../README.md#get-started-with-prompt-flow-⚡) guidance. Ensure you have the following setup:
* [Install prompt flow](../../../README.md#installation)
* [Setup a connection for your API key](../../../README.md#quick-start-⚡)
> ℹ️ For testing quickly, this tutorial uses CLI command.
Clone the promptflow repository to your local machine:
```shell
git clone https://github.com/microsoft/promptflow.git
```
Setup sample `open_ai_connection` connection
```bash
# Override keys with --set to avoid yaml file changes
pf connection create --file ../../connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base> --name open_ai_connection
```
Next, let's get started with customizing the flow for a specific task.
### Customize the flow for a specific task
In the `promptflow/examples/flows/chat` folder, you can see a `basic-chat` folder, which represents a chat template flow as same as the one you created in the [Quick Start](../../../README.md#get-started-with-prompt-flow-⚡) guidance. We'll use this flow as a starting point to build a math problem solver.
```bash
cd ../../flows/chat/basic-chat/
```
To enable your chatbot flow to solve math problems, you need to instruct the LLM about the task and target in the prompt. Open `chat.jinja2`, update the prompt as below:
```jinja
system:
You are an assistant to calculate the answer to the provided math problems.
Please return the final numerical answer only, without any accompanying reasoning or explanation.
{% for item in chat_history %}
user:
{{item.inputs.question}}
assistant:
{{item.outputs.answer}}
{% endfor %}
user:
{{question}}
```
Before run, check your connection settings in `flow.dag.yaml` file. The default connection name is `open_ai_connection`, and the default model is `gpt-3.5-turbo`. If you have a different connection name or model, please modify the `flow.dag.yaml` file accordingly.
><details>
><summary>(click to toggle details) For example, if you use Azure Open AI, please modify the `flow.dag.yaml` file to specify your connection and deployment</summary>
>
> Replace the 'node:' section with following content, specify the 'connection_name' to your Azure Open AI connection, and specify the 'deployment_name' to the model deployment you'd like to use.
> ```yaml
>nodes:
>- name: chat
> type: llm
> source:
> type: code
> path: chat.jinja2
> inputs:
> deployment_name: <your_azure_open_ai_deployment_name> #specify your deployment name
> max_tokens: '256'
> temperature: '0'
> chat_history: ${inputs.chat_history}
> question: ${inputs.question}
> api: chat
> connection: <your_azure_open_ai_connection_name> #specify your azure openai connection name
> ```
</details>
Go back to the `promptflow/examples/flows/chat` path, run the following command to test the flow with a simple math problem:
```bash
cd ..
pf flow test --flow ./basic-chat --inputs question="1+1=?"
```
This will yield the following output:
```json
{
"answer": "2"
}
```
Sometime, the question may be challenging. Now, let's test it with a complex math problem, such as:
```bash
pf flow test --flow ./basic-chat --inputs question="We are allowed to remove exactly one integer from the list $$-1,0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,11,$$and then we choose two distinct integers at random from the remaining list. What number should we remove if we wish to maximize the probability that the sum of the two chosen numbers is 10?"
```
The output is:
```json
{
"answer": "-1"
}
```
However, the correct answer is 5, so the output answer is incorrect! (Don't be surprised if you got the correct answer, as the randiness of LLM. You can try multiple times for different answers.) It indicates that we need to further evaluate the performance. Therefore, in the next step, we will test the flow with more math problems to better evaluate the quality.
### Evaluate the quality of your prompt
With prompt flow, you can quickly trigger a batch-run to test your prompt with a larger dataset, and evaluate the quality of the answers.
There is a `data.jsonl` file in the `promptflow/examples/flows/chat/chat-math-variant` folder, which is a dataset containing 20 test data entries (a subset of [the Math Dataset](https://github.com/hendrycks/math/)). It includes the input question, the ground truth for numerical answer, and the reasoning (raw_answer). Here's one example:
```json
{
"question": "Determine the number of ways to arrange the letters of the word PROOF.",
"answer": "60",
"raw_answer": "There are two O's and five total letters, so the answer is $\\dfrac{5!}{2!} = \\boxed{60}$."
}
```
Run the following command to test your prompt with this dataset:
First, set the environment variable `base_run_name` to specify the run name.
```bash
base_run_name="base_run"
```
<details>
<summary>For Windows CMD users, run commnad in toggle </summary>
```shell
set base_run_name=base_run
```
</details>
>ℹ️ The default model is `gpt-turbo-3.5`, let's try `gpt-4` to see if it's smarter to get better results. Use `--connections <node_name>.connection=<connection_name>...`to specify.
```bash
pf run create --flow ./basic-chat --data ./chat-math-variant/data.jsonl --column-mapping question='${data.question}' chat_history=[] --connections chat.connection=open_ai_connection chat.model=gpt-4 --stream --name $base_run_name
```
>ℹ️ For Azure Open AI, run the following command instead:
> ```shell
> pf run create --flow ./chat_math_variant --data test_data.jsonl --column-mapping question='${data.question}' chat_history=[] --connections chat.connection=azure_open_ai_connection chat.deployment_name=gpt-4 --stream --name $base_run_name
> ```
<details>
<summary>For Windows CMD users, run commnad in toggle</summary>
```shell
pf run create --flow ./basic-chat --data ./chat-math-variant/data.jsonl --column-mapping question='${data.question}' chat_history=[] --connections chat.connection=open_ai_connection chat.model=gpt-4 --stream --name %base_run_name%
```
</details>
> ℹ️ The run name must be unique. Please specify a new name in `--name`.
> If you see "Run 'base_run' already exists.", you can specify another name. But please remember the name you specified, because you'll need it in the next step.
When it completes, you can run the following command to see the details of results:
> Specify the run name of your completed run in `--name` argument:
```bash
pf run show-details --name $base_run_name
```
<details>
<summary>For Windows CMD users, run commnad in toggle</summary>
```shell
pf run show-details --name %base_run_name%
```
</details>
This can show the line by line input and output of the run:
```
+----+---------------+-----------------+---------------+---------------+
| | inputs.chat | inputs.question | inputs.line | outputs.ans |
| | _history | | _number | wer |
+====+===============+=================+===============+===============+
| 0 | [] | Compute $\dbi | 0 | 4368 |
| | | nom{16}{5}$. | | |
+----+---------------+-----------------+---------------+---------------+
| 1 | [] | Determine the | 1 | 60 |
| | | number of | | |
| | | ways to | | |
| | | arrange the | | |
| | | letters of | | |
| | | the word | | |
| | | PROOF. | | |
+----+---------------+-----------------+---------------+---------------+
| .. | ... | ... |... | ... |
```
Next, create an **evaluation run** to calculate the accuracy of the answers based on the previous run.
In the `promptflow/examples/flows/evaluation` folder, you can see a `eval-chat-math` folder, which represents an evaluation flow. We'll use this flow to evaluate the accuracy of the answers.
```bash
cd ../evaluation
```
Run the following command to create an evaluation run:
```bash
eval_run_name="eval_run"
pf run create --flow ./eval-chat-math --data ../chat/chat-math-variant/data.jsonl --column-mapping groundtruth='${data.answer}' prediction='${run.outputs.answer}' --stream --run $base_run_name --name $eval_run_name
```
<details>
<summary>For Windows CMD users, run commnad in toggle</summary>
```shell
set eval_run_name=eval_run
pf run create --flow ./eval-chat-math --data ../chat/chat-math-variant/data.jsonl --column-mapping groundtruth='${data.answer}' prediction='${run.outputs.answer}' --run %base_run_name% --name %eval_run_name% --stream
```
</details>
> If needed, specify the run name which you want to evaluate in `--run` argument, and specify this evaluation run name in `--name` argument.
Then get metrics of the `eval_run`:
```bash
pf run show-metrics --name $eval_run_name
```
<details>
<summary>For Windows CMD users, run commnad in toggle</summary>
```shell
pf run show-details --name %eval_run_name%
```
</details>
You can visualize and compare the output line by line of `base_run` and `eval_run` in a web browser:
```bash
pf run visualize --name "$base_run_name,$eval_run_name"
```
<details>
<summary>For Windows CMD users, run commnad in toggle</summary>
```shell
pf run visualize --name "%base_run_name%,%eval_run_name%"
```
</details>
Because of the randomness of the LLM, the accuracy may vary. For example, in my run, the metrics are as follows:
```json
{
"accuracy": 0.35,
"error_rate": 0.65
}
```
Oops! The accuracy isn't satisfactory. It's time to fine-tune your prompt for higher quality!
### Fine-tuning your prompt and evaluate the improvement
In the `/chat` folder, you can see a `chat-math-variant` folder, which represents a flow with two additional prompt variants compared to the original one you customized based on the `basic-chat`.
In this sample flow, you'll find three Jinja files:
* `chat.jinja2` is the original prompt as same as the one you customized in `basic-chat`.
* `chat_variant_1.jinja2` and `chat_variant_2.jinja2` are the 2 additional prompt variants.
We leverage the Chain of Thought (CoT) prompt engineering method to adjust the prompt. The goal is to activate the Language Model's reasoning capability of the questions, by providing a few CoT examples.
<details>
<summary>Variant_1: 2 CoT examples</summary>
```jinja
system:
You are an assistant to calculate the answer to the provided math problems.
Please think step by step.
Return the final numerical answer only and any accompanying reasoning or explanation seperately as json format. <br>
user:
A jar contains two red marbles, three green marbles, ten white marbles and no other marbles. Two marbles are randomly drawn from this jar without replacement. What is the probability that these two marbles drawn will both be red? Express your answer as a common fraction.
assistant:
{Chain of thought: "The total number of marbles is $2+3+10=15$. The probability that the first marble drawn will be red is $2/15$. Then, there will be one red left, out of 14. Therefore, the probability of drawing out two red marbles will be: $$\\frac{2}{15}\\cdot\\frac{1}{14}=\\boxed{\\frac{1}{105}}$$.", "answer": "1/105"}
user:
Find the greatest common divisor of $7!$ and $(5!)^2.$
assistant:
{"Chain of thought": "$$ \\begin{array} 7! &=& 7 \\cdot 6 \\cdot 5 \\cdot 4 \\cdot 3 \\cdot 2 \\cdot 1 &=& 2^4 \\cdot 3^2 \\cdot 5^1 \\cdot 7^1 \\\\ (5!)^2 &=& (5 \\cdot 4 \\cdot 3 \\cdot 2 \\cdot 1)^2 &=& 2^6 \\cdot 3^2 \\cdot 5^2 \\\\ \\text{gcd}(7!, (5!)^2) &=& 2^4 \\cdot 3^2 \\cdot 5^1 &=& \\boxed{720} \\end{array} $$.", "answer": "720"}
```
</details>
<details>
<summary>Variant_2 : 6 CoT examples.</summary>
```jinja
system:
You are an assistant to calculate the answer to the provided math problems.
Please think step by step.
Return the final numerical answer only and any accompanying reasoning or explanation seperately as json format.
user:
A jar contains two red marbles, three green marbles, ten white marbles and no other marbles. Two marbles are randomly drawn from this jar without replacement. What is the probability that these two marbles drawn will both be red? Express your answer as a common fraction.
assistant:
{Chain of thought: "The total number of marbles is $2+3+10=15$. The probability that the first marble drawn will be red is $2/15$. Then, there will be one red left, out of 14. Therefore, the probability of drawing out two red marbles will be: $$\\frac{2}{15}\\cdot\\frac{1}{14}=\\boxed{\\frac{1}{105}}$$.", "answer": "1/105"}
user:
Find the greatest common divisor of $7!$ and $(5!)^2.$
assistant:
{"Chain of thought": "$$ \\begin{array} 7! &=& 7 \\cdot 6 \\cdot 5 \\cdot 4 \\cdot 3 \\cdot 2 \\cdot 1 &=& 2^4 \\cdot 3^2 \\cdot 5^1 \\cdot 7^1 \\\\ (5!)^2 &=& (5 \\cdot 4 \\cdot 3 \\cdot 2 \\cdot 1)^2 &=& 2^6 \\cdot 3^2 \\cdot 5^2 \\\\ \\text{gcd}(7!, (5!)^2) &=& 2^4 \\cdot 3^2 \\cdot 5^1 &=& \\boxed{720} \\end{array} $$.", "answer": "720"}
user:
A club has 10 members, 5 boys and 5 girls. Two of the members are chosen at random. What is the probability that they are both girls?
assistant:
{"Chain of thought": "There are $\\binomial{10}{2} = 45$ ways to choose two members of the group, and there are $\\binomial{5}{2} = 10$ ways to choose two girls. Therefore, the probability that two members chosen at random are girls is $\\dfrac{10}{45} = \\boxed{\\dfrac{2}{9}}$.", "answer": "2/9"}
user:
Allison, Brian and Noah each have a 6-sided cube. All of the faces on Allison's cube have a 5. The faces on Brian's cube are numbered 1, 2, 3, 4, 5 and 6. Three of the faces on Noah's cube have a 2 and three of the faces have a 6. All three cubes are rolled. What is the probability that Allison's roll is greater than each of Brian's and Noah's? Express your answer as a common fraction.
assistant:
{"Chain of thought": "Since Allison will always roll a 5, we must calculate the probability that both Brian and Noah roll a 4 or lower. The probability of Brian rolling a 4 or lower is $\\frac{4}{6} = \\frac{2}{3}$ since Brian has a standard die. Noah, however, has a $\\frac{3}{6} = \\frac{1}{2}$ probability of rolling a 4 or lower, since the only way he can do so is by rolling one of his 3 sides that have a 2. So, the probability of both of these independent events occurring is $\\frac{2}{3} \\cdot \\frac{1}{2} = \\boxed{\\frac{1}{3}}$.", "answer": "1/3"}
user:
Compute $\\density binomial{50}{2}$.
assistant:
{"Chain of thought": "$\\density binomial{50}{2} = \\dfrac{50!}{2!48!}=\\dfrac{50\\times 49}{2\\times 1}=\\boxed{1225}.$", "answer": "1225"}
user:
The set $S = \\{1, 2, 3, \\ldots , 49, 50\\}$ contains the first $50$ positive integers. After the multiples of 2 and the multiples of 3 are removed, how many integers remain in the set $S$?
assistant:
{"Chain of thought": "The set $S$ contains $25$ multiples of 2 (that is, even numbers). When these are removed, the set $S$ is left with only the odd integers from 1 to 49. At this point, there are $50-25=25$ integers in $S$. We still need to remove the multiples of 3 from $S$.\n\nSince $S$ only contains odd integers after the multiples of 2 are removed, we must remove the odd multiples of 3 between 1 and 49. These are 3, 9, 15, 21, 27, 33, 39, 45, of which there are 8. Therefore, the number of integers remaining in the set $S$ is $25 - 8 = \\boxed{17}$.", "answer": "17"}
```
</details>
These two jinja files are specified in the `flow.dag.yaml` file, which defines the flow structure. You can see that the `chat` node has 3 variants, which point to these 3 Jinja files.
### Test and evaluate your prompt variants
First, you need to modify your flow to add two more prompt variants into the chat node, in addition to the existed default one. In the flow.dag.yaml file, you can see 3 variants definition of the `chat` node, which point to these 3 Jinja files.
Run the CLI command below to start the experiment: test all variants, evaluate them, get the visualized comparison results of the experiment.
> ℹ️ By default, the connection is set to `open_ai_connection` and and the model is set to `gpt-4` for each variant, as specified in the `flow.dag.yaml` file. However, you have the flexibility to specify a different connection and model by adding `--connections chat.connection=<your_connection_name> chat.deployment_name=<model_name>` in the test run command.
Navigate to the `promptflow/examples/flows` folder
```bash
cd ..
```
Set the environment variable `base_run_name` and `eval_run_name` to specify the run name.
```bash
base_run_name="base_run_variant_"
eval_run_name="eval_run_variant_"
```
<details>
<summary>For Windows CMD users, run commnad in toggle</summary>
```shell
set base_run_name=base_run_variant_
set eval_run_name=eval_run_variant_
```
</details>
Run the following command to test and evaluate the variants:
```bash
# Test and evaluate variant_0:
# Test-run
pf run create --flow ./chat/chat-math-variant --data ./chat/chat-math-variant/data.jsonl --column-mapping question='${data.question}' chat_history=[] --variant '${chat.variant_0}' --stream --name "${base_run_name}0"
# Evaluate-run
pf run create --flow ./evaluation/eval-chat-math --data ./chat/chat-math-variant/data.jsonl --column-mapping groundtruth='${data.answer}' prediction='${run.outputs.answer}' --stream --run "${base_run_name}0" --name "${eval_run_name}0"
# Test and evaluate variant_1:
# Test-run
pf run create --flow ./chat/chat-math-variant --data ./chat/chat-math-variant/data.jsonl --column-mapping question='${data.question}' chat_history=[] --variant '${chat.variant_1}' --stream --name "${base_run_name}1"
# Evaluate-run
pf run create --flow ./evaluation/eval-chat-math --data ./chat/chat-math-variant/data.jsonl --column-mapping groundtruth='${data.answer}' prediction='${run.outputs.answer}' --stream --run "${base_run_name}1" --name "${eval_run_name}1"
# Test and evaluate variant_2:
# Test-run
pf run create --flow ./chat/chat-math-variant --data ./chat/chat-math-variant/data.jsonl --column-mapping question='${data.question}' chat_history=[] --variant '${chat.variant_2}' --stream --name "${base_run_name}2"
# Evaluate-run
pf run create --flow ./evaluation/eval-chat-math --data ./chat/chat-math-variant/data.jsonl --column-mapping groundtruth='${data.answer}' prediction='${run.outputs.answer}' --stream --run "${base_run_name}2" --name "${eval_run_name}2"
```
<!-- > If encounter the 'execution timeout' error, just try again. It might be caused by the LLM service congestion. -->
<details>
<summary>For Windows CMD users, run commnad in toggle</summary>
```shell
# Test and evaluate variant_0:
# Test-run
pf run create --flow ./chat/chat-math-variant --data ./chat/chat-math-variant/data.jsonl --column-mapping question='${data.question}' chat_history=[] --variant '${chat.variant_0}' --stream --name %base_run_name%0
# Evaluate-run
pf run create --flow ./evaluation/eval-chat-math --data ./chat/chat-math-variant/data.jsonl --column-mapping groundtruth='${data.answer}' prediction='${run.outputs.answer}' --stream --run %base_run_name%0 --name %eval_run_name%0
# Test and evaluate variant_1:
# Test-run
pf run create --flow ./chat/chat-math-variant --data ./chat/chat-math-variant/data.jsonl --column-mapping question='${data.question}' chat_history=[] --variant '${chat.variant_1}' --stream --name %base_run_name%1
# Evaluate-run
pf run create --flow ./evaluation/eval-chat-math --data ./chat/chat-math-variant/data.jsonl --column-mapping groundtruth='${data.answer}' prediction='${run.outputs.answer}' --stream --run %base_run_name%1 --name %eval_run_name%1
# Test and evaluate variant_2:
# Test-run
pf run create --flow ./chat/chat-math-variant --data ./chat/chat-math-variant/data.jsonl --column-mapping question='${data.question}' chat_history=[] --variant '${chat.variant_2}' --stream --name %base_run_name%2
# Evaluate-run
pf run create --flow ./evaluation/eval-chat-math --data ./chat/chat-math-variant/data.jsonl --column-mapping groundtruth='${data.answer}' prediction='${run.outputs.answer}' --stream --run %base_run_name%2 --name %eval_run_name%2
```
</details>
Get metrics of the all evaluations:
```bash
pf run show-metrics --name "${eval_run_name}0"
pf run show-metrics --name "${eval_run_name}1"
pf run show-metrics --name "${eval_run_name}2"
```
You may get the familiar output like this:
```
# eval_variant_0_run
{
"accuracy": 0.3,
"error_rate": 0.7
}
# eval_variant_1_run
{
"accuracy": 0.9,
"error_rate": 0.1
}
# eval_variant_2_run
{
"accuracy": 0.9,
"error_rate": 0.1
}
```
Visualize the results:
```bash
pf run visualize --name "${base_run_name}0,${eval_run_name}0,${base_run_name}1,${eval_run_name}1,${base_run_name}2,${eval_run_name}2"
```
<details>
<summary>For Windows CMD users, run commnad in toggle</summary>
```shell
pf run visualize --name "%base_run_name%0,%eval_run_name%0,base_run_name%1,%eval_run_name%1,base_run_name%2,%eval_run_name%2"
```
</details>
Click the HTML link, to get the experiment results. Click on column in the **Output** table will allow you to view the snapshot of each line.
The snapshot of chat flow:
![Visualization chat flow](media/visualization_chat_flow.png)
The snapshot of evaluation flow:
![Visualization eval flow](media/visualization_eval_flow.png)
Excellent! Now you can compare their performances and token costs, and choose the prompt that best suits your needs. We can see that variant_1 and variant_2 have the same accuracy, but variant_1 has a lower token cost (only 2 few shots rather than the 6 in in variant_2). So variant_1 is the best choice for the quality and cost balance.
### Conclusion
Great! Now you can compare their performances and token costs to choose the prompt that best suits your needs. Upon comparison, we can observe that variant_1 and variant_2 have the similar accuracy. However, variant_1 stands out as the better choice due to its lower token cost (2 few-shots vs. 6 few-shots).
<img src="media/realcase.png" alt="comparison result" width=65%/>
It is evident that adding more CoT examples in the prompt does not necessarily improve the accuracy further. Instead, we should identify the optimal point where the number of shots maximizes accuracy while minimizing cost.
Just in a few steps, we identified that variant_1 strikes the ideal balance between quality and cost! This is where the value of prompt tuning and evaluation using prompt flow becomes apparent. With prompt flow, you can easily test and evaluate different prompt variants, enabling you to facilitate high quality LLM-native apps to production.
| 0 |
promptflow_repo/promptflow/examples/tutorials | promptflow_repo/promptflow/examples/tutorials/flow-deploy/README.md | # Deploy flow as applications
This folder contains examples of how to build & deploy flow as applications like Web Application packaged in Docker format. | 0 |
promptflow_repo/promptflow/examples/tutorials/flow-deploy | promptflow_repo/promptflow/examples/tutorials/flow-deploy/distribute-flow-as-executable-app/app.spec | # -*- mode: python ; coding: utf-8 -*-
from PyInstaller.utils.hooks import collect_data_files
from PyInstaller.utils.hooks import copy_metadata
datas = [('connections', 'connections'), ('flow', 'flow'), ('settings.json', '.'), ('main.py', '.'), ('{{streamlit_runtime_interpreter_path}}', './streamlit/runtime')]
datas += collect_data_files('streamlit')
datas += copy_metadata('streamlit')
datas += collect_data_files('keyrings.alt', include_py_files=True)
datas += copy_metadata('keyrings.alt')
datas += collect_data_files('streamlit_quill')
block_cipher = None
a = Analysis(
['app.py', 'main.py'],
pathex=[],
binaries=[],
datas=datas,
hiddenimports=['bs4'],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False,
)
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
exe = EXE(
pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='app',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True,
disable_windowed_traceback=False,
argv_emulation=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None,
) | 0 |
promptflow_repo/promptflow/examples/tutorials/flow-deploy | promptflow_repo/promptflow/examples/tutorials/flow-deploy/distribute-flow-as-executable-app/app.py | import os
import sys
from promptflow._cli._pf._connection import create_connection
from streamlit.web import cli as st_cli
from streamlit.runtime import exists
from main import start
def is_yaml_file(file_path):
_, file_extension = os.path.splitext(file_path)
return file_extension.lower() in ('.yaml', '.yml')
def create_connections(directory_path) -> None:
for root, dirs, files in os.walk(directory_path):
for file in files:
file_path = os.path.join(root, file)
if is_yaml_file(file_path):
create_connection(file_path)
if __name__ == "__main__":
create_connections(os.path.join(os.path.dirname(__file__), "connections"))
if exists():
start()
else:
main_script = os.path.join(os.path.dirname(__file__), "main.py")
sys.argv = ["streamlit", "run", main_script, "--global.developmentMode=false"]
st_cli.main(prog_name="streamlit")
| 0 |
promptflow_repo/promptflow/examples/tutorials/flow-deploy | promptflow_repo/promptflow/examples/tutorials/flow-deploy/distribute-flow-as-executable-app/README.md | ---
resources: examples/connections/azure_openai.yml, examples/flows/standard/web-classification
---
# Distribute flow as executable app
This example demos how to package flow as a executable app.
We will use [web-classification](../../../flows/standard/web-classification/README.md) as example in this tutorial.
Please ensure that you have installed all the required dependencies. You can refer to the "Prerequisites" section in the README of the [web-classification](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard/web-classification/) for a comprehensive list of prerequisites and installation instructions. And we recommend you to add a `requirements.txt` to indicate all the required dependencies for each flow.
[Pyinstaller](https://pyinstaller.org/en/stable/installation.html) is a popular tool used for converting Python applications into standalone executables. It allows you to package your Python scripts into a single executable file, which can be run on a target machine without requiring the Python interpreter to be installed.
[Streamlit](https://docs.streamlit.io/library/get-started) is an open-source Python library used for creating web applications quickly and easily. It's designed for data scientists and engineers who want to turn data scripts into shareable web apps with minimal effort.
We use Pyinstaller to package the flow and Streamlit to create custom web apps. Prior to distributing the workflow, kindly ensure that you have installed them.
In this example, we use PyInstaller version 5.13.2 and Streamlit version 1.26.0 within a Python 3.10.8 environment.
## Build a flow as executable format
Note that all dependent connections must be created before building as executable.
```bash
# create connection if not created before
pf connection create --file ../../../connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base> --name open_ai_connection
```
Use the command below to build a flow as executable format app:
```shell
pf flow build --source ../../../flows/standard/web-classification --output target --format executable
```
## Executable format folder structure
Exported files & its dependencies are located in the same folder. The structure is as below:
- flow: the folder contains all the flow files.
- connections: the folder contains yaml files to create all related connections.
- app.py: the entry file is included as the entry point for the bundled application.
- app.spec: the spec file tells PyInstaller how to process your script.
- main.py: it will start Streamlit service and be called by the entry file.
- settings.json: a json file to store the settings of the executable application.
- build: a folder contains various log and working files.
- dist: a folder contains the executable application.
- README.md: Simple introduction of the files.
### A template script of the entry file
PyInstaller reads a spec file or Python script written by you. It analyzes your code to discover every other module and library your script needs in order to execute. Then it collects copies of all those files, including the active Python interpreter, and puts them with your script in a single folder, or optionally in a single executable file.
We provide a Python entry script named `app.py` as the entry point for the bundled app, which enables you to serve a flow folder as an endpoint.
```python
import os
import sys
from promptflow._cli._pf._connection import create_connection
from streamlit.web import cli as st_cli
from streamlit.runtime import exists
from main import start
def is_yaml_file(file_path):
_, file_extension = os.path.splitext(file_path)
return file_extension.lower() in ('.yaml', '.yml')
def create_connections(directory_path) -> None:
for root, dirs, files in os.walk(directory_path):
for file in files:
file_path = os.path.join(root, file)
if is_yaml_file(file_path):
create_connection(file_path)
if __name__ == "__main__":
create_connections(os.path.join(os.path.dirname(__file__), "connections"))
if exists():
start()
else:
main_script = os.path.join(os.path.dirname(__file__), "main.py")
sys.argv = ["streamlit", "run", main_script, "--global.developmentMode=false"]
st_cli.main(prog_name="streamlit")
```
:::
### A template script of the spec file
The spec file tells PyInstaller how to process your script. It encodes the script names and most of the options you give to the pyinstaller command. The spec file is actually executable Python code. PyInstaller builds the app by executing the contents of the spec file.
To streamline this process, we offer a `app.spec` spec file that bundles the application into a single file. For additional information on spec files, you can refer to the [Using Spec Files](https://pyinstaller.org/en/stable/spec-files.html).
Please replace {{streamlit_runtime_interpreter_path}} with the path of streamlit runtime interpreter in your environment.
```spec
# -*- mode: python ; coding: utf-8 -*-
from PyInstaller.utils.hooks import collect_data_files
from PyInstaller.utils.hooks import copy_metadata
datas = [('connections', 'connections'), ('flow', 'flow'), ('settings.json', '.'), ('main.py', '.'), ('{{streamlit_runtime_interpreter_path}}', './streamlit/runtime')]
datas += collect_data_files('streamlit')
datas += copy_metadata('streamlit')
datas += collect_data_files('keyrings.alt', include_py_files=True)
datas += copy_metadata('keyrings.alt')
block_cipher = None
a = Analysis(
['app.py', 'main.py'],
pathex=[],
binaries=[],
datas=datas,
hiddenimports=['bs4'],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False,
)
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
exe = EXE(
pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='app',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True,
disable_windowed_traceback=False,
argv_emulation=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None,
)
```
### The bundled application using Pyinstaller
Once you've build a flow as executable format following [Build a flow as executable format](#build-a-flow-as-executable-format).
It will create two folders named `build` and `dist` within your specified output directory, denoted as <your-output-dir>. The `build` folder houses various log and working files, while the `dist` folder contains the `app` executable application.
#### Connections
If the service involves connections, all related connections will be exported as yaml files and recreated in the executable package.
Secrets in connections won't be exported directly. Instead, we will export them as a reference to environment variables:
```yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/OpenAIConnection.schema.json
type: open_ai
name: open_ai_connection
module: promptflow.connections
api_key: ${env:OPEN_AI_CONNECTION_API_KEY} # env reference
```
## Test the endpoint
Finally, You can distribute the bundled application `app` to other people. They can execute your program by double clicking the executable file, e.g. `app.exe` in Windows system or running the binary file, e.g. `app` in Linux system.
The development server has a built-in web page they can use to test the flow by opening 'http://localhost:8501' in the browser. The expected result is as follows: if the flow served successfully, the process will keep alive until it is killed manually.
To your users, the app is self-contained. They do not need to install any particular version of Python or any modules. They do not need to have Python installed at all.
**Note**: The executable generated is not cross-platform. One platform (e.g. Windows) packaged executable can't run on others (Mac, Linux).
## Known issues
1. Note that Python 3.10.0 contains a bug making it unsupportable by PyInstaller. PyInstaller will also not work with beta releases of Python 3.13.
| 0 |
promptflow_repo/promptflow/examples/tutorials/flow-deploy | promptflow_repo/promptflow/examples/tutorials/flow-deploy/distribute-flow-as-executable-app/main.py | import base64
import json
import os
import re
import streamlit as st
from pathlib import Path
from streamlit_quill import st_quill # noqa: F401
from bs4 import BeautifulSoup, NavigableString, Tag
from promptflow._sdk._utils import print_yellow_warning
from promptflow._sdk._serving.flow_invoker import FlowInvoker
from promptflow._utils.multimedia_utils import is_multimedia_dict, MIME_PATTERN
invoker = None
def start():
def clear_chat() -> None:
st.session_state.messages = []
def show_image(image, key=None):
if not image.startswith("data:image"):
st.image(key + ',' + image)
else:
st.image(image)
def json_dumps(value):
try:
return json.dumps(value)
except Exception:
return value
def is_list_contains_rich_text(rich_text):
result = False
for item in rich_text:
if isinstance(item, list):
result |= is_list_contains_rich_text(item)
elif isinstance(item, dict):
result |= is_dict_contains_rich_text(item)
else:
if isinstance(item, str) and item.startswith("data:image"):
result = True
return result
def is_dict_contains_rich_text(rich_text):
result = False
for rich_text_key, rich_text_value in rich_text.items():
if isinstance(rich_text_value, list):
result |= is_list_contains_rich_text(rich_text_value)
elif isinstance(rich_text_value, dict):
result |= is_dict_contains_rich_text(rich_text_value)
elif re.match(MIME_PATTERN, rich_text_key) or (
isinstance(rich_text_value, str) and rich_text_value.startswith("data:image")):
result = True
return result
def render_message(role, message_items):
def item_render_message(value, key=None):
if key and re.match(MIME_PATTERN, key):
show_image(value, key)
elif isinstance(value, str) and value.startswith("data:image"):
show_image(value)
else:
if key is None:
st.markdown(f"`{json_dumps(value)},`")
else:
st.markdown(f"`{key}: {json_dumps(value)},`")
def list_iter_render_message(message_items):
if is_list_contains_rich_text(message_items):
st.markdown("`[ `")
for item in message_items:
if isinstance(item, list):
list_iter_render_message(item)
if isinstance(item, dict):
dict_iter_render_message(item)
else:
item_render_message(item)
st.markdown("`], `")
else:
st.markdown(f"`{json_dumps(message_items)},`")
def dict_iter_render_message(message_items):
if is_multimedia_dict(message_items):
key = list(message_items.keys())[0]
value = message_items[key]
show_image(value, key)
elif is_dict_contains_rich_text(message_items):
st.markdown("`{ `")
for key, value in message_items.items():
if re.match(MIME_PATTERN, key):
show_image(value, key)
else:
if isinstance(value, list):
st.markdown(f"`{key}: `")
list_iter_render_message(value)
elif isinstance(value, dict):
st.markdown(f"`{key}: `")
dict_iter_render_message(value)
else:
item_render_message(value, key)
st.markdown("`}, `")
else:
st.markdown(f"`{json_dumps(message_items)},`")
with st.chat_message(role):
dict_iter_render_message(message_items)
def show_conversation() -> None:
if "messages" not in st.session_state:
st.session_state.messages = []
st.session_state.history = []
if st.session_state.messages:
for role, message_items in st.session_state.messages:
render_message(role, message_items)
def get_chat_history_from_session():
if "history" in st.session_state:
return st.session_state.history
return []
def submit(**kwargs) -> None:
st.session_state.messages.append(("user", kwargs))
session_state_history = dict()
session_state_history.update({"inputs": kwargs})
with container:
render_message("user", kwargs)
# Force append chat history to kwargs
response = run_flow(kwargs)
st.session_state.messages.append(("assistant", response))
session_state_history.update({"outputs": response})
st.session_state.history.append(session_state_history)
with container:
render_message("assistant", response)
def run_flow(data: dict) -> dict:
global invoker
if not invoker:
flow = Path(__file__).parent / "flow"
dump_path = flow.parent
if flow.is_dir():
os.chdir(flow)
else:
os.chdir(flow.parent)
invoker = FlowInvoker(flow, connection_provider="local", dump_to=dump_path)
result, result_output = invoker.invoke(data)
print_yellow_warning(f"Result: {result_output}")
return result
def extract_content(node):
if isinstance(node, NavigableString):
text = node.strip()
if text:
return [text]
elif isinstance(node, Tag):
if node.name == 'img':
prefix, base64_str = node['src'].split(',', 1)
return [{prefix: base64_str}]
else:
result = []
for child in node.contents:
result.extend(extract_content(child))
return result
return []
def parse_html_content(html_content):
soup = BeautifulSoup(html_content, 'html.parser')
result = []
for p in soup.find_all('p'):
result.extend(extract_content(p))
return result
def parse_image_content(image_content, image_type):
if image_content is not None:
file_contents = image_content.read()
image_content = base64.b64encode(file_contents).decode('utf-8')
prefix = f"data:{image_type};base64"
return {prefix: image_content}
st.title("web-classification APP")
st.chat_message("assistant").write("Hello, please input following flow inputs.")
container = st.container()
with container:
show_conversation()
with st.form(key='input_form', clear_on_submit=True):
settings_path = os.path.join(os.path.dirname(__file__), "settings.json")
if os.path.exists(settings_path):
with open(settings_path, "r") as file:
json_data = json.load(file)
environment_variables = list(json_data.keys())
for environment_variable in environment_variables:
secret_input = st.text_input(
label=environment_variable,
type="password",
placeholder=f"Please input {environment_variable} here. If you input before, you can leave it "
f"blank.")
if secret_input != "":
os.environ[environment_variable] = secret_input
url = st.text_input(label='url',
placeholder='https://play.google.com/store/apps/details?id=com.twitter.android')
cols = st.columns(7)
submit_bt = cols[0].form_submit_button(label='Submit')
clear_bt = cols[1].form_submit_button(label='Clear')
if submit_bt:
submit(url=url)
if clear_bt:
clear_chat()
if __name__ == "__main__":
start()
| 0 |
promptflow_repo/promptflow/examples/tutorials/flow-deploy | promptflow_repo/promptflow/examples/tutorials/flow-deploy/kubernetes/README.md | ---
resources: examples/connections/azure_openai.yml, examples/flows/standard/web-classification
---
# Deploy flow using Kubernetes
This example demos how to deploy flow as a Kubernetes app.
We will use [web-classification](../../../flows/standard/web-classification/README.md) as example in this tutorial.
Please ensure that you have installed all the required dependencies. You can refer to the "Prerequisites" section in the README of the [web-classification](../../../flows/standard/web-classification/README.md#Prerequisites) for a comprehensive list of prerequisites and installation instructions.
## Build a flow as docker format
Note that all dependent connections must be created before building as docker.
```bash
# create connection if not created before
pf connection create --file ../../../connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base> --name open_ai_connection
```
Use the command below to build a flow as docker format app:
```bash
pf flow build --source ../../../flows/standard/web-classification --output dist --format docker
```
## Deploy with Kubernetes
### Build Docker image
Like other Dockerfile, you need to build the image first. You can tag the image with any name you want. In this example, we use `web-classification-serve`.
Then run the command below:
```shell
cd dist
docker build . -t web-classification-serve
```
### Create Kubernetes deployment yaml.
The Kubernetes deployment yaml file acts as a guide for managing your docker container in a Kubernetes pod. It clearly specifies important information like the container image, port configurations, environment variables, and various settings. Below, you'll find a simple deployment template that you can easily customize to meet your needs.
**Note**: You need encode the secret using base64 firstly and input the <encoded_secret> as 'open-ai-connection-api-key' in the deployment configuration. For example, you can run below commands in linux:
```shell
encoded_secret=$(echo -n <your_api_key> | base64)
```
```yaml
---
kind: Namespace
apiVersion: v1
metadata:
name: web-classification
---
apiVersion: v1
kind: Secret
metadata:
name: open-ai-connection-api-key
namespace: web-classification
type: Opaque
data:
open-ai-connection-api-key: <encoded_secret>
---
apiVersion: v1
kind: Service
metadata:
name: web-classification-service
namespace: web-classification
spec:
type: NodePort
ports:
- name: http
port: 8080
targetPort: 8080
nodePort: 30123
selector:
app: web-classification-serve-app
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: web-classification-serve-app
namespace: web-classification
spec:
selector:
matchLabels:
app: web-classification-serve-app
template:
metadata:
labels:
app: web-classification-serve-app
spec:
containers:
- name: web-classification-serve-container
image: web-classification-serve
imagePullPolicy: Never
ports:
- containerPort: 8080
env:
- name: OPEN_AI_CONNECTION_API_KEY
valueFrom:
secretKeyRef:
name: open-ai-connection-api-key
key: open-ai-connection-api-key
```
### Apply the deployment.
Before you can deploy your application, ensure that you have set up a Kubernetes cluster and installed [kubectl](https://kubernetes.io/docs/reference/kubectl/) if it's not already installed. In this documentation, we will use [Minikube](https://minikube.sigs.k8s.io/docs/) as an example. To start the cluster, execute the following command:
```shell
minikube start
```
Once your Kubernetes cluster is up and running, you can proceed to deploy your application by using the following command:
```shell
kubectl apply -f deployment.yaml
```
This command will create the necessary pods to run your application within the cluster.
**Note**: You need replace <pod_name> below with your specific pod_name. You can retrieve it by running `kubectl get pods -n web-classification`.
### Retrieve flow service logs of the container
The kubectl logs command is used to retrieve the logs of a container running within a pod, which can be useful for debugging, monitoring, and troubleshooting applications deployed in a Kubernetes cluster.
```shell
kubectl -n web-classification logs <pod-name>
```
#### Connections
If the service involves connections, all related connections will be exported as yaml files and recreated in containers.
Secrets in connections won't be exported directly. Instead, we will export them as a reference to environment variables:
```yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/OpenAIConnection.schema.json
type: open_ai
name: open_ai_connection
module: promptflow.connections
api_key: ${env:OPEN_AI_CONNECTION_API_KEY} # env reference
```
You'll need to set up the environment variables in the container to make the connections work.
### Test the endpoint
- Option1:
Once you've started the service, you can establish a connection between a local port and a port on the pod. This allows you to conveniently test the endpoint from your local terminal.
To achieve this, execute the following command:
```shell
kubectl port-forward <pod_name> 8080:8080 -n web-classification
```
With the port forwarding in place, you can use the curl command to initiate the endpoint test:
```shell
curl http://localhost:8080/score --data '{"url":"https://play.google.com/store/apps/details?id=com.twitter.android"}' -X POST -H "Content-Type: application/json"
```
- Option2:
`minikube service web-classification-service --url -n web-classification` runs as a process, creating a tunnel to the cluster. The command exposes the service directly to any program running on the host operating system.
The command above will retrieve the URL of a service running within a Minikube Kubernetes cluster (e.g. http://<ip>:<assigned_port>), which you can click to interact with the flow service in your web browser. Alternatively, you can use the following command to test the endpoint:
**Note**: Minikube will use its own external port instead of nodePort to listen to the service. So please substitute <assigned_port> with the port obtained above.
```shell
curl http://localhost:<assigned_port>/score --data '{"url":"https://play.google.com/store/apps/details?id=com.twitter.android"}' -X POST -H "Content-Type: application/json"
```
| 0 |
promptflow_repo/promptflow/examples/tutorials/flow-deploy | promptflow_repo/promptflow/examples/tutorials/flow-deploy/kubernetes/deployment.yaml | ---
kind: Namespace
apiVersion: v1
metadata:
name: web-classification
---
apiVersion: v1
kind: Secret
metadata:
name: open-ai-connection-api-key
namespace: web-classification
type: Opaque
data:
open-ai-connection-api-key: <encoded_secret>
---
apiVersion: v1
kind: Service
metadata:
name: web-classification-service
namespace: web-classification
spec:
type: NodePort
ports:
- name: http
port: 8080
targetPort: 8080
nodePort: 30123
selector:
app: web-classification-serve-app
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: web-classification-serve-app
namespace: web-classification
spec:
selector:
matchLabels:
app: web-classification-serve-app
template:
metadata:
labels:
app: web-classification-serve-app
spec:
containers:
- name: web-classification-serve-container
image: web-classification-serve
imagePullPolicy: Never
ports:
- containerPort: 8080
env:
- name: OPEN_AI_CONNECTION_API_KEY
valueFrom:
secretKeyRef:
name: open-ai-connection-api-key
key: open-ai-connection-api-key | 0 |
promptflow_repo/promptflow/examples/tutorials/flow-deploy | promptflow_repo/promptflow/examples/tutorials/flow-deploy/create-service-with-flow/README.md | ---
resources: examples/tutorials/flow-deploy/create-service-with-flow
---
# Create service with flow
This example shows how to create a simple service with flow.
You can create your own service by utilize `flow-as-function`.
This folder contains a example on how to build a service with a flow.
Reference [here](./simple_score.py) for a minimal service example.
The output of score.py will be a json serialized dictionary.
You can use json parser to parse the output.
## 1. Start the service and put in background
```bash
nohup python simple_score.py &
# Note: added this to run in our CI pipeline, not needed for user.
sleep 10
```
## 2. Test the service with request
Executing the following command to send a request to execute a flow.
```bash
curl -X POST http://127.0.0.1:5000/score --header "Content-Type: application/json" --data '{"flow_input": "some_flow_input", "node_input": "some_node_input"}'
```
Sample output of the request:
```json
{
"output": {
"value": "some_flow_input"
}
}
```
Reference [here](./simple_score.py) for more.
| 0 |
promptflow_repo/promptflow/examples/tutorials/flow-deploy | promptflow_repo/promptflow/examples/tutorials/flow-deploy/create-service-with-flow/simple_score.py | import json
import logging
from flask import Flask, jsonify, request
from promptflow import load_flow
from promptflow.connections import AzureOpenAIConnection
from promptflow.entities import FlowContext
from promptflow.exceptions import SystemErrorException, UserErrorException
class SimpleScoreApp(Flask):
pass
app = SimpleScoreApp(__name__)
logging.basicConfig(format="%(threadName)s:%(message)s")
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# load flow as a function, the function object can be shared accross threads.
f = load_flow("./echo_connection_flow/")
@app.errorhandler(Exception)
def handle_error(e):
if isinstance(e, UserErrorException):
return jsonify({"message": e.message, "additional_info": e.additional_info}), 400
elif isinstance(e, SystemErrorException):
return jsonify({"message": e.message, "additional_info": e.additional_info}), 500
else:
from promptflow._internal import ErrorResponse, ExceptionPresenter
# handle other unexpected errors, can use internal class to format them
# but interface may change in the future
presenter = ExceptionPresenter.create(e)
trace_back = presenter.formatted_traceback
resp = ErrorResponse(presenter.to_dict(include_debug_info=False))
response_code = resp.response_code
result = resp.to_simplified_dict()
result.update({"trace_back": trace_back})
return jsonify(result), response_code
@app.route("/health", methods=["GET"])
def health():
"""Check if the runtime is alive."""
return {"status": "Healthy"}
@app.route("/score", methods=["POST"])
def score():
"""process a flow request in the runtime."""
raw_data = request.get_data()
logger.info(f"Start loading request data '{raw_data}'.")
data = json.loads(raw_data)
# create a dummy connection object
# the connection object will only exist in memory and won't store in local db.
llm_connection = AzureOpenAIConnection(
name="llm_connection", api_key="[determined by request]", api_base="[determined by request]"
)
# configure flow contexts, create a new context object for each request to make sure they are thread safe.
f.context = FlowContext(
# override flow connections with connection object created above
connections={"echo_connection": {"connection": llm_connection}},
# override the flow nodes' inputs or other flow configs, the overrides may come from the request
# **Note**: after this change, node "echo_connection" will take input node_input from request
overrides={"nodes.echo_connection.inputs.node_input": data["node_input"]} if "node_input" in data else {},
)
# data in request will be passed to flow as kwargs
result_dict = f(**data)
# Note: if specified streaming=True in the flow context, the result will be a generator
# reference promptflow._sdk._serving.response_creator.ResponseCreator on how to handle it in app.
return jsonify(result_dict)
def create_app(**kwargs):
return app
if __name__ == "__main__":
# test this with curl -X POST http://127.0.0.1:5000/score --header "Content-Type: application/json" --data '{"flow_input": "some_flow_input", "node_input": "some_node_input"}' # noqa: E501
create_app().run(debug=True)
| 0 |
promptflow_repo/promptflow/examples/tutorials/flow-deploy/create-service-with-flow | promptflow_repo/promptflow/examples/tutorials/flow-deploy/create-service-with-flow/echo_connection_flow/echo_connection.py | from promptflow import tool
from promptflow.connections import AzureOpenAIConnection
@tool
def echo_connection(flow_input: str, node_input: str, connection: AzureOpenAIConnection):
print(f"Flow input: {flow_input}")
print(f"Node input: {node_input}")
print(f"Flow connection: {connection._to_dict()}")
# get from env var
return {"value": flow_input}
| 0 |
promptflow_repo/promptflow/examples/tutorials/flow-deploy/create-service-with-flow | promptflow_repo/promptflow/examples/tutorials/flow-deploy/create-service-with-flow/echo_connection_flow/flow.dag.yaml | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs:
flow_input:
type: string
outputs:
output:
type: object
reference: ${echo_connection.output}
nodes:
- name: echo_connection
type: python
source:
type: code
path: echo_connection.py
inputs:
flow_input: ${inputs.flow_input}
node_input: "dummy_node_input"
connection: open_ai_connection
| 0 |
promptflow_repo/promptflow/examples/tutorials/flow-deploy | promptflow_repo/promptflow/examples/tutorials/flow-deploy/docker/README.md | ---
resources: examples/connections/azure_openai.yml, examples/flows/standard/web-classification
---
# Deploy a flow using Docker
This example demos how to deploy flow as a docker app.
We will use [web-classification](../../../flows/standard/web-classification/README.md) as example in this tutorial.
## Build a flow as docker format app
Note that all dependent connections must be created before building as docker.
```bash
# create connection if not created before
pf connection create --file ../../../connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base> --name open_ai_connection
```
Use the command below to build a flow as docker format app:
```bash
pf flow build --source ../../../flows/standard/web-classification --output dist --format docker
```
## Deploy with Docker
### Build Docker image
Like other Dockerfile, you need to build the image first. You can tag the image with any name you want. In this example, we use `promptflow-serve`.
Run the command below to build image:
```shell
docker build dist -t web-classification-serve
```
### Run Docker image
Run the docker image will start a service to serve the flow inside the container.
#### Connections
If the service involves connections, all related connections will be exported as yaml files and recreated in containers.
Secrets in connections won't be exported directly. Instead, we will export them as a reference to environment variables:
```yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/OpenAIConnection.schema.json
type: open_ai
name: open_ai_connection
module: promptflow.connections
api_key: ${env:OPEN_AI_CONNECTION_API_KEY} # env reference
```
You'll need to set up the environment variables in the container to make the connections work.
### Run with `docker run`
You can run the docker image directly set via below commands:
```shell
# The started service will listen on port 8080.You can map the port to any port on the host machine as you want.
docker run -p 8080:8080 -e OPEN_AI_CONNECTION_API_KEY=<secret-value> web-classification-serve
```
### Test the endpoint
After start the service, you can use curl to test it:
```shell
curl http://localhost:8080/score --data '{"url":"https://play.google.com/store/apps/details?id=com.twitter.android"}' -X POST -H "Content-Type: application/json"
```
| 0 |
promptflow_repo/promptflow/examples/tutorials/flow-deploy | promptflow_repo/promptflow/examples/tutorials/flow-deploy/azure-app-service/README.md | ---
resources: examples/connections/azure_openai.yml, examples/flows/standard/web-classification
---
# Deploy flow using Azure App Service
This example demos how to deploy a flow using Azure App Service.
[Azure App Service](https://learn.microsoft.com/azure/app-service/) is an HTTP-based service for hosting web applications, REST APIs, and mobile back ends.
The scripts (`deploy.sh` for bash and `deploy.ps1` for powershell) under this folder are here to help deploy the docker image to Azure App Service.
We will use [web-classification](../../../flows/standard/web-classification/README.md) as example in this tutorial.
## Build a flow as docker format app
Note that all dependent connections must be created before building as docker.
```bash
# create connection if not created before
pf connection create --file ../../../connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base> --name open_ai_connection
```
Use the command below to build a flow as docker format app:
```bash
pf flow build --source ../../../flows/standard/web-classification --output dist --format docker
```
## Deploy with Azure App Service
The two scripts will do the following things:
1. Create a resource group if not exists.
2. Build and push the image to docker registry.
3. Create an app service plan with the give sku.
4. Create an app with specified name, set the deployment container image to the pushed docker image.
5. Set up the environment variables for the app.
Example command to use bash script:
```shell
bash deploy.sh --path dist -i <image_tag> --name my_app_23d8m -r <docker registry> -g <resource_group>
```
Example command to use powershell script:
```powershell
.\deploy.ps1 dist -i <image_tag> -n my-app-23d8m -r <docker registry> -g <resource_group>
```
Note that the `name` will produce a unique FQDN as AppName.azurewebsites.net.
See the full parameters by `bash deploy.sh -h` or `.\deploy.ps1 -h`.
## View and test the web app
The web app can be found via [azure portal](https://portal.azure.com/)
![img](assets/azure_portal_img.png)
After the app created, you will need to go to https://portal.azure.com/ find the app and set up the environment variables
at (Settings>Configuration) or (Settings>Environment variables), then restart the app.
![img](assets/set_env_var.png)
Browse the app at Overview and see the test page:
![img](assets/test_page.png)
You can also test the app by sending a POST request to the app like:
```shell
curl http://<Default-domain-of-app-service>/score --data '{"url":"https://play.google.com/store/apps/details?id=com.twitter.android"}' -X POST -H "Content-Type: application/json"
```
Tips:
- Reach deployment logs at (Deployment>Deployment Central) and app logs at (Monitoring>Log stream).
- Reach advanced deployment tools at https://$name.scm.azurewebsites.net/.
- Reach more details about app service at https://learn.microsoft.com/azure/app-service/.
| 0 |
promptflow_repo/promptflow/examples/tutorials/flow-deploy | promptflow_repo/promptflow/examples/tutorials/flow-deploy/azure-app-service/deploy.ps1 | <#
.DESCRIPTION
Script to deploy promptflow to Azure App Service.
.PARAMETER path
The folder path to be deployed
.PARAMETER image_tag
The container image tag.
.PARAMETER registry
The container registry name, for example 'xx.azurecr.io'.
.PARAMETER name
The app name to produce a unique FQDN as AppName.azurewebsites.net.
.PARAMETER location
The app location, default to 'centralus'.
.PARAMETER sku
The app sku, default to 'F1'(free).
.PARAMETER resource_group
The app resource group.
.PARAMETER subscription
The app subscription, default using az account subscription.
.PARAMETER verbose
verbose mode.
.EXAMPLE
PS> .\deploy.ps1 -Path <folder-path> -Name my_app_23d8m -i <image_tag> -r <registry> -n <app_name> -g <resource_group>
.EXAMPLE
PS> .\deploy.ps1 -Path <folder-path> -Name my_app_23d8m -i <image_tag> -r <registry> -n <app_name> -g <resource_group> -Subscription "xxxx-xxxx-xxxx-xxxx-xxxx" -Verbose
#>
[CmdletBinding()]
param(
[string]$Path,
[Alias("i", "image_tag")][string]$ImageTag,
[Alias("r")][string]$Registry,
[Alias("n")][string]$Name,
[Alias("l")][string]$Location = "eastus",
[string]$Sku = "F1",
[Alias("g", "resource_group")][string]$ResourceGroup,
[string]$Subscription
)
####################### Validate args ############################
$ErrorActionPreference = "Stop"
# fail if image_tag not provided
if (!$ImageTag) {
Write-Host "***************************"
Write-Host "* Error: image_tag is required.*"
Write-Host "***************************"
exit 1
}
# check if : in image_tag
if (!$ImageTag.Contains(":")) {
$version="v$(Get-Date -Format 'yyyyMMdd-HHmmss')"
$image_tag="${ImageTag}:${version}"
}
Write-Host "image_tag: $ImageTag"
# fail if Registry not provided
if (!$Registry) {
Write-Host "***************************"
Write-Host "* Error: registry is required.*"
Write-Host "***************************"
exit
}
# fail if name not provided
if (!$Name) {
Write-Host "***************************"
Write-Host "* Error: name is required.*"
Write-Host "***************************"
exit
}
# fail if resource_group not provided
if (!$ResourceGroup) {
Write-Host "***************************"
Write-Host "* Error: resource_group is required.*"
Write-Host "***************************"
exit
}
# fail if image_tag not provided
if (!$Path) {
Write-Host "***************************"
Write-Host "* Error: Path is required.*"
Write-Host "***************************"
exit 1
}
####################### Build and push image ############################
Write-Host "Change working directory to $Path"
cd $Path
docker build -t "$ImageTag" .
if ($Registry.Contains("azurecr.io")) {
Write-Host "Trying to login to $Registry..."
az acr login -n "$Registry"
$AcrImageTag = $Registry + "/" + $ImageTag
Write-Host "ACR image tag: $AcrImageTag"
docker tag "$ImageTag" "$AcrImageTag"
$ImageTag = $AcrImageTag
}
else {
Write-Host "***************************************************\n"
Write-Host "* WARN: Make sure you have docker account login!!!*\n"
Write-Host "***************************************************\n"
$DockerImageTag = $Registry + "/" + $ImageTag
Write-Host "Docker image tag: $DockerImageTag"
docker tag "$ImageTag" "$DockerImageTag"
$ImageTag = $DockerImageTag
}
Write-Host "Start pushing image...$ImageTag"
docker push "$ImageTag"
####################### Create and config app ############################
function Append-To-Command {
param (
[string] $Command
)
if ($Subscription) {
$Command = "$Command --subscription $Subscription"
}
if ($VerbosePreference -eq "Continue") {
$Command="$Command --debug"
}
Write-Host "$Command"
return $Command
}
function Invoke-Expression-And-Check{
param (
[string]$Command
)
$Command=$(Append-To-Command "$Command")
Invoke-Expression $Command
if ($LASTEXITCODE -gt 0) {
exit $LASTEXITCODE
}
}
# Check and create resource group if not exist
$Result = (az group exists --name $ResourceGroup)
if ($Result -eq "false") {
Write-Host "Creating resource group...$ResourceGroup"
$Command="az group create --name $ResourceGroup -l $Location"
Invoke-Expression-And-Check "$Command"
}
# Create service plan
$ServicePlanName = $Name + "_service_plan"
Write-Host "Creating service plan...$ServicePlanName"
$Command="az appservice plan create --name $ServicePlanName --sku $Sku --location $location --is-linux -g $ResourceGroup"
Invoke-Expression-And-Check "$Command"
# Create app
Write-Host "Creating app...$Name"
$Command="az webapp create --name $Name -p $ServicePlanName --deployment-container-image-name $ImageTag --startup-file 'bash start.sh' -g $ResourceGroup"
Invoke-Expression-And-Check "$Command"
# Config environment variable
Write-Host "Config app...$Name"
$Command="az webapp config appsettings set -g $ResourceGroup --name $Name --settings USER_AGENT=promptflow-appservice ('@settings.json')"
Invoke-Expression-And-Check "$Command"
Write-Host "Please go to https://portal.azure.com/ to config environment variables and restart the app: $Name at (Settings>Configuration) or (Settings>Environment variables)"
Write-Host "Reach deployment logs at (Deployment>Deployment Central) and app logs at (Monitoring>Log stream)"
Write-Host "Reach advanced deployment tools at https://$Name.scm.azurewebsites.net/"
Write-Host "Reach more details about app service at https://learn.microsoft.com/en-us/azure/app-service/"
| 0 |
promptflow_repo/promptflow/examples/tutorials/flow-deploy | promptflow_repo/promptflow/examples/tutorials/flow-deploy/azure-app-service/deploy.sh | #! /bin/bash
set -e
program_name=$0
function usage {
echo "usage: $program_name [-i|-image_tag|--image_tag]"
echo " -i|-image_tag|--image_tag specify container image tag"
echo " -r|-registry|--registry specify container registry name, for example 'xx.azurecr.io'"
echo " -n|-name|--name specify app name to produce a unique FQDN as AppName.azurewebsites.net."
echo " -l|-location|--location specify app location, default to 'centralus'"
echo " -sku|--sku specify app sku, default to 'F1'(free)"
echo " -g|-resource_group|--resource_group specify app resource group"
echo " -subscription|--subscription specify app subscription, default using az account subscription"
echo " -v|-verbose|--verbose specify verbose mode"
echo " -p|-path|--path specify folder path to be deployed"
exit 1
}
if [ "$1" == "-help" ] || [ "$1" == "-h" ]; then
usage
exit 0
fi
location="eastus"
sku="F1"
verbose=false
####################### Parse and validate args ############################
while [ $# -gt 0 ]; do
case "$1" in
-i|-image_tag|--image_tag)
image_tag="$2"
;;
-r|-registry|--registry)
registry_name="$2"
;;
-n|-name|--name)
name="$2"
;;
-l|-location|--location)
location="$2"
;;
-sku|--sku)
sku="$2"
;;
-g|-resource_group|--resource_group)
resource_group="$2"
;;
-subscription|--subscription)
subscription="$2"
;;
-v|-verbose|--verbose)
verbose=true
;;
-p|-path|--path)
path="$2"
;;
*)
printf "***************************\n"
printf "* Error: Invalid argument.*\n"
printf "***************************\n"
exit 1
esac
shift
shift
done
# fail if image_tag not provided
if [ -z "$image_tag" ]; then
printf "***************************\n"
printf "* Error: image_tag is required.*\n"
printf "***************************\n"
exit 1
fi
# check if : in image_tag
if [[ $image_tag == *":"* ]]; then
echo "image_tag: $image_tag"
else
version="v$(date '+%Y%m%d-%H%M%S')"
image_tag="$image_tag:$version"
echo "image_tag: $image_tag"
fi
# fail if registry_name not provided
if [ -z "$registry_name" ]; then
printf "***************************\n"
printf "* Error: registry is required.*\n"
printf "***************************\n"
fi
# fail if name not provided
if [ -z "$name" ]; then
printf "***************************\n"
printf "* Error: name is required.*\n"
printf "***************************\n"
fi
# fail if resource_group not provided
if [ -z "$resource_group" ]; then
printf "***************************\n"
printf "* Error: resource_group is required.*\n"
printf "***************************\n"
fi
# fail if path not provided
if [ -z "$path" ]; then
printf "***************************\n"
printf "* Error: path is required.*\n"
printf "***************************\n"
exit 1
fi
####################### Build and push image ############################
echo "Change working directory to $path"
cd "$path"
docker build -t "$image_tag" .
if [[ $registry_name == *"azurecr.io" ]]; then
echo "Trying to login to $registry_name..."
az acr login -n "$registry_name"
acr_image_tag=$registry_name/$image_tag
echo "ACR image tag: $acr_image_tag"
docker tag "$image_tag" "$acr_image_tag"
image_tag=$acr_image_tag
else
echo "Make sure you have docker account login!!!"
printf "***************************************************\n"
printf "* WARN: Make sure you have docker account login!!!*\n"
printf "***************************************************\n"
docker_image_tag=$registry_name/$image_tag
echo "Docker image tag: $docker_image_tag"
docker tag "$image_tag" "$docker_image_tag"
image_tag=$docker_image_tag
fi
echo "Start pushing image...$image_tag"
docker push "$image_tag"
####################### Create and config app ############################
function append_to_command {
command=$1
if [ -n "$subscription" ]; then
command="$command --subscription $subscription"
fi
if $verbose; then
command="$command --debug"
fi
echo "$command"
}
# Check and create resource group if not exist
result=$(az group exists --name "$resource_group")
if [ "$result" = "false" ]; then
echo "Creating resource group...$resource_group"
command="az group create --name $resource_group -l $location"
command=$(append_to_command "$command")
eval "$command"
fi
# Create service plan
service_plan_name=$name"_service_plan"
echo "Creating service plan...$service_plan_name"
command="az appservice plan create --name $service_plan_name --sku $sku --location $location --is-linux -g $resource_group"
command=$(append_to_command "$command")
echo "$command"
eval "$command"
# Create app
echo "Creating app...$name"
command="az webapp create --name $name -p $service_plan_name --deployment-container-image-name $image_tag --startup-file 'bash start.sh' -g $resource_group"
command=$(append_to_command "$command")
echo "$command"
eval "$command"
# Config environment variable
echo "Config app...$name"
command="az webapp config appsettings set -g $resource_group --name $name --settings USER_AGENT=promptflow-appservice @settings.json "
command=$(append_to_command "$command")
echo "$command"
eval "$command"
echo "Please go to https://portal.azure.com/ to config environment variables and restart the app: $name at (Settings>Configuration) or (Settings>Environment variables)"
echo "Reach deployment logs at (Deployment>Deployment Central) and app logs at (Monitoring>Log stream)"
echo "Reach advanced deployment tools at https://$name.scm.azurewebsites.net/"
echo "Reach more details about app service at https://learn.microsoft.com/en-us/azure/app-service/"
| 0 |
promptflow_repo/promptflow/examples/tutorials | promptflow_repo/promptflow/examples/tutorials/e2e-development/chat-with-pdf.md | ---
resources: examples/connections/azure_openai.yml, examples/flows/chat/chat-with-pdf
---
# Tutorial: Chat with PDF
## Overview
Retrieval Augmented Generation (or RAG) has become a prevalent pattern to build intelligent application with Large Language Models (or LLMs) since it can infuse external knowledge into the model, which is not trained with those up-to-date or proprietary information. The screenshot below shows how new Bing in Edge sidebar can answer questions based on the page content on the left - in this case, a PDF file.
![edge-chat-pdf](../../flows/chat/chat-with-pdf/assets/edge-chat-pdf.png)
Note that new Bing will also search web for more information to generate the answer, let's ignore that part for now.
In this tutorial we will try to mimic the functionality of retrieval of relevant information from the PDF to generate an answer with GPT.
**We will guide you through the following steps:**
Creating a console chatbot "chat_with_pdf" that takes a URL to a PDF file as an argument and answers questions based on the PDF's content.
Constructing a prompt flow for the chatbot, primarily reusing the code from the first step.
Creating a dataset with multiple questions to swiftly test the flow.
Evaluating the quality of the answers generated by the chat_with_pdf flow.
Incorporating these tests and evaluations into your development cycle, including unit tests and CI/CD.
Deploying the flow to Azure App Service and Streamlit to handle real user traffic.
## Prerequisite
To go through this tutorial you should:
1. Install dependencies
```bash
cd ../../flows/chat/chat-with-pdf/
pip install -r requirements.txt
```
2. Install and configure [Prompt flow for VS Code extension](https://marketplace.visualstudio.com/items?itemName=prompt-flow.prompt-flow) follow [Quick Start Guide](https://microsoft.github.io/promptflow/how-to-guides/quick-start.html). (_This extension is optional but highly recommended for flow development and debugging._)
3. Deploy an OpenAI or Azure OpenAI chat model (e.g. gpt4 or gpt-35-turbo-16k), and an Embedding model (text-embedding-ada-002). Follow the [how-to](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource?pivots=web-portal) for an Azure OpenAI example.
## Console chatbot chat_with_pdf
A typical RAG process consists of two steps:
- **Retrieval**: Retrieve contextual information from external systems (database, search engine, files, etc.)
- **Generation**: Construct the prompt with the retrieved context and get response from LLMs.
The retrieval step, being more of a search problem, can be quite complex. A widely used, simple yet effective approach is vector search, which requires an index building process. Suppose you have one or more documents containing the contextual information, the index building process would look something like this:
1. **Chunk**: Break down the documents into multiple chunks of text.
2. **Embedding**: Each text chunk is then processed by an embedding model to convert it into an array of floating-point numbers, also known as embedding or vector.
3. **Indexing**: These vectors are then stored in an index or a database that supports vector search. This allows for the retrieval of the top K relevant or similar vectors from the index or database.
Once the index is built, the **Retrieval** step simply involves converting the question into an embedding/vector and performing a vector search on the index to obtain the most relevant context for the question.
OK now back to the chatbot we want to build, a simplified design could be:
<img src="../../flows/chat/chat-with-pdf/assets/chat_with_pdf_simple.png" width="300" alt="chat with pdf simple design"/>
A more robust or practical application might consider using an external vector database to store the vectors. For this simple example we're using a [FAISS](https://github.com/facebookresearch/faiss) index, which can be saved as a file. However, a more robust or practical application should consider using an external vector database with advanced management capabilities to store the vectors. With this sample's FAISS index, to prevent repetitive downloading and index building for same PDF file, we will add a check that if the PDF file already exists then we won't download, same for index building.
This design is quite effective for question and answering, but it may fall short when it comes to multi-turn conversations with the chatbot. Consider a scenario like this:
> $User: what is BERT?
>
> $Bot: BERT stands for Bidirectional Encoder Representations from Transformers.
>
> $User: is it better than GPT?
>
> $Bot: ...
You would typically expect the chatbot to be intelligent enough to decipher that the "it" in your second question refers to BERT, and your actual question is "is BERT better than GPT". However, if you present the question "is it better than GPT" to the embedding model and then to the vector index/database, they won't recognize that "it" represents BERT. Consequently, you won't receive the most relevant context from the index. To address this issue, we will enlist the assistance of a Large Language Model (LLM), such as GPT, to "rewrite" the question based on the previous question. The updated design is as follows:
<img src="../../flows/chat/chat-with-pdf/assets/chat_with_pdf_with_rewrite.png" width="400" alt="chat with pdf better design"/>
A "rewrite_question" step is performed before feeding the question to "find_context" step.
### Configurations
Despite being a minimalistic LLM application, there are several aspects we may want to adjust or experiment with in the future. We'll store these in environment variables for ease of access and modification. In the subsequent sections, we'll guide you on how to experiment with these configurations to enhance your chat application's quality.
Create a .env file in the second chat_with_pdf directory (same directory with the main.py) and populate it with the following content. We can use the load_dotenv() function (from the python-dotenv package) to import these into our environment variables later on. We'll delve into what these variables represent when discussing how each step of the process is implemented.
Rename the .env.example file in chat_with_pdf directory and modify per your need.
> If you're using Open AI, your .env should look like:
```ini
OPENAI_API_KEY=<open_ai_key>
EMBEDDING_MODEL_DEPLOYMENT_NAME=<text-embedding-ada-002>
CHAT_MODEL_DEPLOYMENT_NAME=<gpt-4>
PROMPT_TOKEN_LIMIT=3000
MAX_COMPLETION_TOKENS=1024
CHUNK_SIZE=256
CHUNK_OVERLAP=64
VERBOSE=False
```
Note: if you have an org id, it can be set via OPENAI_ORG_ID=<your_org_id>
> If you're using Azure Open AI, you .env should look like:
```ini
OPENAI_API_TYPE=azure
OPENAI_API_BASE=<AOAI_endpoint>
OPENAI_API_KEY=<AOAI_key>
OPENAI_API_VERSION=2023-05-15
EMBEDDING_MODEL_DEPLOYMENT_NAME=<text-embedding-ada-002>
CHAT_MODEL_DEPLOYMENT_NAME=<gpt-4>
PROMPT_TOKEN_LIMIT=3000
MAX_COMPLETION_TOKENS=1024
CHUNK_SIZE=256
CHUNK_OVERLAP=64
VERBOSE=False
```
Note: CHAT_MODEL_DEPLOYMENT_NAME should point to a chat model like gpt-3.5-turbo or gpt-4, OPENAI_API_KEY should use the deployment key, and EMBEDDING_MODEL_DEPLOYMENT_NAME should point to a text embedding model like text-embedding-ada-002.
### Take a look at the chatbot in action!
You should be able to run the console app by:
```shell
python chat_with_pdf/main.py https://arxiv.org/pdf/1810.04805.pdf
```
> Note: https://arxiv.org/pdf/1810.04805.pdf is the paper about one of the most famous earlier LLMs: BERT.
It looks like below if everything goes fine:
![chatbot console](../../flows/chat/chat-with-pdf/assets/chatbot_console.gif)
Now, let's delve into the actual code that implements the chatbot.
### Implementation of each steps
#### Download pdf: [download.py](../../flows/chat/chat-with-pdf/chat_with_pdf/download.py)
The downloaded PDF file will be stored into a temp folder.
#### Build index: [build_index.py](../../flows/chat/chat-with-pdf/chat_with_pdf/build_index.py)
Several libraries are used in this step to build index:
1. PyPDF2 for extraction of text from the PDF file.
2. OpenAI python library for generating embeddings.
3. The FAISS library is utilized to build a vector index and save it to a file. It's important to note that an additional dictionary is used to maintain the mapping from the vector index to the actual text snippet. This is because when we later attempt to query for the most relevant context, we need to locate the text snippets, not just the embedding or vector.
The environment variables used in this step:
- OPENAI_API_* and EMBEDDING_MODEL_DEPLOYMENT_NAME: to access the Azure OpenAI embedding model
- CHUNK_SIZE and CHUNK_OVERLAP: controls how to split the PDF file into chunks for embedding
#### Rewrite question: [rewrite_question.py](../../flows/chat/chat-with-pdf/chat_with_pdf/rewrite_question.py)
This step is to use ChatGPT/GPT4 to rewrite the question to be better fit for finding relevant context from the vector index. The prompt file [rewrite_question.md](../../flows/chat/chat-with-pdf/chat_with_pdf/rewrite_question_prompt.md) should give you a better idea how it works.
#### Find context: [find_context.py](../../flows/chat/chat-with-pdf/chat_with_pdf/find_context.py)
In this step we load the FAISS index and the dict that were built in the "build index" step. We then turn the question into a vector using the same embedding function in the build index step. There is a small trick in this step to make sure the context will not exceed the token limit of model input prompt ([aoai model max request tokens](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models), OpenAI has similar limit). The output of this step is the final prompt that QnA step will send to the chat model. The PROMPT_TOKEN_LIMIT environment variable decides how big the context is.
#### QnA: [qna.py](../../flows/chat/chat-with-pdf/chat_with_pdf/qna.py)
Use OpenAI's ChatGPT or GPT4 model and ChatCompletion API to get an answer with the previous conversation history and context from PDF.
#### The main loop: [main.py](../../flows/chat/chat-with-pdf/chat_with_pdf/main.py)
This is the main entry of the chatbot, which includes a loop that reads questions from user input and subsequently calls the steps mentioned above to provide an answer.
To simplify this example, we store the downloaded file and the constructed index as local files. Although there is a mechanism in place to utilize cached files/indices, loading the index still takes a certain amount of time and contributes to a latency that users may notice. Moreover, if the chatbot is hosted on a server, it requires requests for the same PDF file to hit the same server node in order to effectively use the cache. In a real-world scenario, it's likely preferable to store the index in a centralized service or database. There're many such database available, such as [Azure Cognitive Search](https://learn.microsoft.com/en-us/azure/search/vector-search-overview), [Pinecone](https://www.pinecone.io/), [Qdrant](https://qdrant.tech/), ...
## Prompt flow: when you start considering the quality of your LLM app
Having a functioning chatbot is a great start, but it's only the beginning of the journey. Much like any application based on machine learning, the development of a high-quality LLM app usually involves a substantial amount of tuning. This could include experimenting with different prompts such as rewriting questions or QnAs, adjusting various parameters like chunk size, overlap size, or context limit, or even redesigning the workflow (for instance, deciding whether to include the rewrite_question step in our example).
Appropriate tooling is essential for facilitating this experimentation and fine-tuning process with LLM apps. This is where the concept of prompt flow comes into play. It enables you to test your LLM apps by:
- Running a few examples and manually verifying the results.
- Running larger scale tests with a formal approach (using metrics) to assess your app's quality.
You may have already learned how to create a prompt flow from scratch. Building a prompt flow from existing code is also straightforward. You can construct a chat flow either by composing the YAML file or using the visual editor of [Visual Studio Code extension](https://marketplace.visualstudio.com/items?itemName=prompt-flow.prompt-flow) and create a few wrappers for existing code.
Check out below:
- [flow.dag.yaml](../../flows/chat/chat-with-pdf/flow.dag.yaml)
- [setup_env.py](../../flows/chat/chat-with-pdf/setup_env.py)
- [download_tool.py](../../flows/chat/chat-with-pdf/download_tool.py)
- [build_index_tool.py](../../flows/chat/chat-with-pdf/build_index_tool.py)
- [rewrite_question_tool.py](../../flows/chat/chat-with-pdf/rewrite_question_tool.py)
- [find_context_tool.py](../../flows/chat/chat-with-pdf/find_context_tool.py)
- [qna_tool.py](../../flows/chat/chat-with-pdf/qna_tool.py)
E.g. build_index_tool wrapper:
```python
from promptflow import tool
from chat_with_pdf.build_index import create_faiss_index
@tool
def build_index_tool(pdf_path: str) -> str:
return create_faiss_index(pdf_path)
```
The setup_env node requires some explanation: you might recall that we use environment variables to manage different configurations, including OpenAI API key in the console chatbot, in prompt flow we use [Connection](https://microsoft.github.io/promptflow/concepts/concept-connections.html) to manage access to external services like OpenAI and support passing configuration object into flow so that you can do experimentation easier. The setup_env node is to write the properties from connection and configuration object into environment variables. This allows the core code of the chatbot remain unchanged.
We're using Azure OpenAI in this example, below is the shell command to do so:
**CLI**
```bash
# create connection needed by flow
if pf connection list | grep open_ai_connection; then
echo "open_ai_connection already exists"
else
pf connection create --file ../../../connections/azure_openai.yml --name open_ai_connection --set api_key=<your_api_key> api_base=<your_api_base>
fi
```
If you plan to use OpenAI instead you can use below instead:
```shell
# create connection needed by flow
if pf connection list | grep open_ai_connection; then
echo "open_ai_connection already exists"
else
pf connection create --file ../../../connections/openai.yml --name open_ai_connection --set api_key=<your_api_key>
fi
```
The flow looks like:
<img src="../../flows/chat/chat-with-pdf/assets/multi-node-flow-chat-with-pdf.png" width="500" alt="chat with pdf flow, multi-node"/>
## Prompt flow evaluations
Now the prompt flow for chat_with_pdf is created, you might have already run/debug flow through the Visual Studio Code extension. It's time to do some testing and evaluation, which starts with:
1. Create a test dataset which contains a few question and pdf_url pairs.
2. Use existing [evaluation flows](https://github.com/microsoft/promptflow/tree/main/examples/flows/evaluation) or develop new evaluation flows to generate metrics.
A small dataset can be found here: [bert-paper-qna.jsonl](../../flows/chat/chat-with-pdf/data/bert-paper-qna.jsonl) which contains around 10 questions for the BERT paper.
Evaluations are executed through 'batch runs'. Conceptually, they are a batch run of an evaluation flow which uses the previous run as input.
Here is an example of how to create a batch run for the chat_with_pdf flow using the test dataset and manually reviewing the output. This can be done through the Visual Studio Code extension, or CLI or Python SDK.
**batch_run.yaml**
```yaml
name: chat_with_pdf_default_20230820_162219_559000
flow: .
data: ./data/bert-paper-qna.jsonl
#run: <Uncomment to select a run input>
column_mapping:
chat_history: ${data.chat_history}
pdf_url: ${data.pdf_url}
question: ${data.question}
config:
EMBEDDING_MODEL_DEPLOYMENT_NAME: text-embedding-ada-002
CHAT_MODEL_DEPLOYMENT_NAME: gpt-35-turbo
PROMPT_TOKEN_LIMIT: 3000
MAX_COMPLETION_TOKENS: 1024
VERBOSE: true
CHUNK_SIZE: 256
CHUNK_OVERLAP: 64
```
**CLI**
```bash
run_name="chat_with_pdf_"$(openssl rand -hex 12)
pf run create --file batch_run.yaml --stream --name $run_name
```
The output will include something like below:
```json
{
"name": "chat_with_pdf_default_20230820_162219_559000",
"created_on": "2023-08-20T16:23:39.608101",
"status": "Completed",
"display_name": "chat_with_pdf_default_20230820_162219_559000",
"description": null,
"tags": null,
"properties": {
"flow_path": "/Users/<user>/Work/azure-promptflow/scratchpad/chat_with_pdf",
"output_path": "/Users/<user>/.promptflow/.runs/chat_with_pdf_default_20230820_162219_559000"
},
"flow_name": "chat_with_pdf",
"data": "/Users/<user>/Work/azure-promptflow/scratchpad/chat_with_pdf/data/bert-paper-qna.jsonl",
"output": "/Users/<user>/.promptflow/.runs/chat_with_pdf_default_20230820_162219_559000/ flow_outputs/output.jsonl"
}
```
Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI.
And we developed two evaluation flows one for "[groundedness](../../flows/evaluation/eval-groundedness/)" and one for "[perceived intelligence](../../flows/evaluation/eval-perceived-intelligence/)". These two flows are using GPT models (ChatGPT or GPT4) to "grade" the answers. Reading the prompts will give you better idea what are these two metrics:
- [groundedness prompt](../../flows/evaluation/eval-groundedness/gpt_groundedness.md)
- [perceived intelligence prompt](../../flows/evaluation/eval-perceived-intelligence/gpt_perceived_intelligence.md)
The following example creates an evaluation flow.
**eval_run.yaml:**
```yaml
flow: ../../evaluation/eval-groundedness
run: chat_with_pdf_default_20230820_162219_559000
column_mapping:
question: ${run.inputs.question}
answer: ${run.outputs.answer}
context: ${run.outputs.context}
```
> NOTE: the run property in eval_run.yaml is the run name of batch_run.yaml
**CLI:**
```bash
eval_run_name="eval_groundedness_"$(openssl rand -hex 12)
pf run create --file eval_run.yaml --run $run_name --name $eval_run_name
```
> Note: this assumes that you have followed previous steps to create OpenAI/Azure OpenAI connection with name "open_ai_connection".
After the run completes you can use below commands to get detail of the runs:
```bash
pf run show-details --name $eval_run_name
pf run show-metrics --name $eval_run_name
pf run visualize --name $eval_run_name
```
## Experimentation!!
We have now explored how to conduct tests and evaluations for prompt flow. Additionally, we have defined two metrics to gauge the performance of our chat_with_pdf flow. By trying out various settings and configurations, running evaluations, and then comparing the metrics, we can determine the optimal configuration for production deployment.
There are several aspects we can experiment with, including but not limited to:
* Varying prompts for the rewrite_question and/or QnA steps.
* Adjusting the chunk size or chunk overlap during index building.
* Modifying the context limit.
These elements can be managed through the "config" object in the flow inputs. If you wish to experiment with the first point (varying prompts), you can add properties to the config object to control this behavior - simply by directing it to different prompt files.
Take a look at how we experiment with #3 in below test: [test_eval in tests/chat_with_pdf_test.py](../../flows/chat/chat-with-pdf/tests/azure_chat_with_pdf_test.py). This test will create 6 runs in total:
1. chat_with_pdf_2k_context
2. chat_with_pdf_3k_context
3. eval_groundedness_chat_with_pdf_2k_context
4. eval_perceived_intelligence_chat_with_pdf_2k_context
5. eval_groundedness_chat_with_pdf_3k_context
6. eval_perceived_intelligence_chat_with_pdf_3k_context
As you can probably tell through the names: run #3 and #4 generate metrics for run #1, run #5 and #6 generate metrics for run #2. You can compare these metrics to decide which performs better - 2K context or 3K context.
NOTE: [azure_chat_with_pdf_test](../../flows/chat/chat-with-pdf/tests/azure_chat_with_pdf_test.py) does the same tests but using Azure AI as backend, so you can see all the runs in a nice web portal with all the logs and metrics comparison etc.
Further reading:
- Learn [how to experiment with the chat-with-pdf flow](../../flows/chat/chat-with-pdf/chat-with-pdf.ipynb)
- Learn [how to experiment with the chat-with-pdf flow on Azure](../../flows/chat/chat-with-pdf/chat-with-pdf-azure.ipynb) so that you can collaborate with your team.
## Integrate prompt flow into your CI/CD workflow
It's also straightforward to integrate these into your CI/CD workflow using either CLI or SDK. In this example we have various unit tests to run tests/evaluations for chat_with_pdf flow.
Check the [test](../../flows/chat/chat-with-pdf/tests/) folder.
```bash
# run all the tests
python -m unittest discover -s tests -p '*_test.py'
```
## Deployment
The flow can be deployed across multiple platforms, such as a local development service, within a Docker container, onto a Kubernetes cluster, etc.
The following sections will guide you through the process of deploying the flow to a Docker container, for more details about
the other choices, please refer to [flow deploy docs](https://microsoft.github.io/promptflow/how-to-guides/deploy-a-flow/index.html).
### Build a flow as docker format app
Use the command below to build a flow as docker format app:
```bash
pf flow build --source . --output dist --format docker
```
### Deploy with Docker
#### Build Docker image
Like other Dockerfile, you need to build the image first. You can tag the image with any name you want. In this example, we use `promptflow-serve`.
Run the command below to build image:
```shell
docker build dist -t chat-with-pdf-serve
```
#### Run Docker image
Run the docker image will start a service to serve the flow inside the container.
##### Connections
If the service involves connections, all related connections will be exported as yaml files and recreated in containers.
Secrets in connections won't be exported directly. Instead, we will export them as a reference to environment variables:
```yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/OpenAIConnection.schema.json
type: open_ai
name: open_ai_connection
module: promptflow.connections
api_key: ${env:OPEN_AI_CONNECTION_API_KEY} # env reference
```
You'll need to set up the environment variables in the container to make the connections work.
#### Run with `docker run`
You can run the docker image directly set via below commands:
```shell
# The started service will listen on port 8080.You can map the port to any port on the host machine as you want.
docker run -p 8080:8080 -e OPEN_AI_CONNECTION_API_KEY=<secret-value> chat-with-pdf-serve
```
#### Test the endpoint
After start the service, you can open the test page at `http://localhost:8080/` and test it:
![test-page](../../flows/chat/chat-with-pdf/assets/chat_with_pdf_test_page.png)
or use curl to test it from cli:
```shell
curl http://localhost:8080/score --data '{"question":"what is BERT?", "chat_history": [], "pdf_url": "https://arxiv.org/pdf/1810.04805.pdf", "config": {"EMBEDDING_MODEL_DEPLOYMENT_NAME": "text-embedding-ada-002", "CHAT_MODEL_DEPLOYMENT_NAME": "gpt-35-turbo", "PROMPT_TOKEN_LIMIT": 3000, "MAX_COMPLETION_TOKENS": 256, "VERBOSE": true, "CHUNK_SIZE": 1024, "CHUNK_OVERLAP": 64}}' -X POST -H "Content-Type: application/json"
```
![test-endpoint](../../flows/chat/chat-with-pdf/assets/chat_with_pdf_test_endpoint.png)
| 0 |
promptflow_repo/promptflow/examples/tutorials | promptflow_repo/promptflow/examples/tutorials/flow-in-pipeline/pipeline.ipynb | # import required libraries
from azure.identity import DefaultAzureCredential, InteractiveBrowserCredential
from azure.ai.ml import MLClient, load_component, Input
from azure.ai.ml.constants import AssetTypes
from azure.ai.ml.dsl import pipelinetry:
credential = DefaultAzureCredential()
# Check if given credential can get token successfully.
credential.get_token("https://management.azure.com/.default")
except Exception as ex:
# Fall back to InteractiveBrowserCredential in case DefaultAzureCredential not work
credential = InteractiveBrowserCredential()# Get a handle to workspace
ml_client = MLClient.from_config(credential=credential)
# Retrieve an already attached Azure Machine Learning Compute.
cluster_name = "cpu-cluster"
print(ml_client.compute.get(cluster_name))flow_component = load_component("../../flows/standard/web-classification/flow.dag.yaml")data_input = Input(
path="../../flows/standard/web-classification/data.jsonl", type=AssetTypes.URI_FILE
)
@pipeline()
def pipeline_func_with_flow(data):
flow_node = flow_component(
data=data,
url="${data.url}",
connections={
"summarize_text_content": {
"connection": "azure_open_ai_connection",
"deployment_name": "gpt-35-turbo",
},
"classify_with_llm": {
"connection": "azure_open_ai_connection",
"deployment_name": "gpt-35-turbo",
},
},
)
flow_node.compute = "cpu-cluster"
# create pipeline instance
pipeline_job = pipeline_func_with_flow(data=data_input)# submit job to workspace
pipeline_job = ml_client.jobs.create_or_update(
pipeline_job, experiment_name="pipeline_samples"
)
pipeline_job# Wait until the job completes
ml_client.jobs.stream(pipeline_job.name) | 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/dynamic-list-input-tool-showcase/README.md | # Basic flow with tool using a dynamic list input
This is a flow demonstrating how to use a tool with a dynamic list input.
Tools used in this flow:
- `python` Tool
Connections used in this flow:
- None
## Prerequisites
Install promptflow sdk and other dependencies:
```bash
pip install -r requirements.txt
```
## Run flow
- Test flow
```bash
pf flow test --flow .
```
| 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/dynamic-list-input-tool-showcase/requirements.txt | promptflow
my-tools-package | 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/dynamic-list-input-tool-showcase/flow.dag.yaml | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs: {}
outputs:
output:
type: string
reference: ${My_Tool_with_Dynamic_List_Input_cywc.output}
nodes:
- name: My_Tool_with_Dynamic_List_Input_cywc
type: python
source:
type: package
tool: my_tool_package.tools.tool_with_dynamic_list_input.my_tool
inputs:
input_prefix: hi
input_text:
- grape3
- elderberry5
endpoint_name: my_endpoint
| 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/custom-strong-type-connection-script-tool-showcase/data.jsonl | {"text": "Python Hello World!"}
{"text": "C Hello World!"}
{"text": "C# Hello World!"}
| 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/custom-strong-type-connection-script-tool-showcase/custom.yml | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/CustomConnection.schema.json
name: normal_custom_connection
type: custom
configs:
api_base: test
secrets: # must-have
api_key: <to-be-replaced>
| 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/custom-strong-type-connection-script-tool-showcase/README.md | # Basic flow with script tool using custom strong type connection
This is a flow demonstrating the use of a script tool with custom string type connection which provides a secure way to manage credentials for external APIs and data sources, and it offers an improved user-friendly and intellisense experience compared to custom connections.
Tools used in this flow:
- custom `python` tool
Connections used in this flow:
- custom strong type connection
## Prerequisites
Install promptflow sdk and other dependencies:
```bash
pip install -r requirements.txt
```
## Setup connection
Create connection if you haven't done that.
```bash
# Override keys with --set to avoid yaml file changes
pf connection create -f custom.yml --set secrets.api_key='<your_api_key>' configs.api_base='<your_api_base>'
```
Ensure you have created `normal_custom_connection` connection.
```bash
pf connection show -n normal_custom_connection
```
## Run flow
### Run with single line input
```bash
# test with default input value in flow.dag.yaml
pf flow test --flow .
# test with flow inputs
pf flow test --flow . --inputs text="Promptflow"
```
### Run with multiple lines data
- create run
```bash
pf run create --flow . --data ./data.jsonl --stream
```
- list and show run meta
```bash
# list created run
pf run list -r 3
# get a sample run name
name=$(pf run list -r 10 | jq '.[] | select(.name | contains("custom_strong_type")) | .name'| head -n 1 | tr -d '"')
# show specific run detail
pf run show --name $name
# show output
pf run show-details --name $name
# visualize run in browser
pf run visualize --name $name
```
### Run with connection override
Run flow with newly created connection.
```bash
pf run create --flow . --data ./data.jsonl --connections my_script_tool.connection=normal_custom_connection --stream
```
| 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/custom-strong-type-connection-script-tool-showcase/my_script_tool.py | from promptflow import tool
from promptflow.connections import CustomStrongTypeConnection
from promptflow.contracts.types import Secret
class MyCustomConnection(CustomStrongTypeConnection):
"""My custom strong type connection.
:param api_key: The api key.
:type api_key: Secret
:param api_base: The api base.
:type api_base: String
"""
api_key: Secret
api_base: str = "This is a fake api base."
@tool
def my_tool(connection: MyCustomConnection, input_text: str) -> str:
# Replace with your tool code.
# Use custom strong type connection like: connection.api_key, connection.api_base
return "Hello " + input_text
| 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/custom-strong-type-connection-script-tool-showcase/requirements.txt | promptflow[azure]==1.1.0
| 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/custom-strong-type-connection-script-tool-showcase/flow.dag.yaml | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs:
text:
type: string
default: Microsoft
outputs:
my_output:
type: string
reference: ${my_script_tool.output}
nodes:
- name: my_script_tool
type: python
source:
type: code
path: my_script_tool.py
inputs:
connection: normal_custom_connection
input_text: ${inputs.text}
| 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/custom_llm_tool_showcase/custom_connection.yml | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/CustomConnection.schema.json
name: basic_custom_connection
type: custom
configs:
api_base: <to-be-replaced>
secrets: # must-have
api_key: <to-be-replaced>
| 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/custom_llm_tool_showcase/README.md | # Flow with custom_llm tool
This is a flow demonstrating how to use a `custom_llm` tool, which enables users to seamlessly connect to a large language model with prompt tuning experience using a `PromptTemplate`.
Tools used in this flow:
- `custom_llm` Tool
Connections used in this flow:
- custom connection
## Prerequisites
Install promptflow sdk and other dependencies:
```bash
pip install -r requirements.txt
```
## Setup connection
Create connection if you haven't done that.
```bash
# Override keys with --set to avoid yaml file changes
pf connection create -f custom_connection.yml --set secrets.api_key=<your_api_key> configs.api_base=<your_api_base>
```
Ensure you have created `basic_custom_connection` connection.
```bash
pf connection show -n basic_custom_connection
```
## Run flow
- Test flow
```bash
pf flow test --flow .
```
| 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/custom_llm_tool_showcase/requirements.txt | promptflow
my-tools-package | 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/custom_llm_tool_showcase/prompt_template.jinja2 | Welcome to {{ website_name }}!
{% if user_name %}
Hello, {{ user_name }}!
{% else %}
Hello there!
{% endif %} | 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/custom_llm_tool_showcase/flow.dag.yaml | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs:
website_name:
type: string
default: Microsoft
user_name:
type: string
default: ""
outputs:
output:
type: string
reference: ${my_custom_llm_tool.output}
nodes:
- name: my_custom_llm_tool
type: custom_llm
source:
type: package_with_prompt
tool: my_tool_package.tools.tool_with_custom_llm_type.my_tool
path: prompt_template.jinja2
inputs:
connection: basic_custom_connection
website_name: ${inputs.website_name}
user_name: ${inputs.user_name}
| 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/custom-strong-type-connection-package-tool-showcase/data.jsonl | {"text": "Python Hello World!"}
{"text": "C Hello World!"}
{"text": "C# Hello World!"} | 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/custom-strong-type-connection-package-tool-showcase/README.md | # Basic flow with package tool using custom strong type connection
This is a flow demonstrating the use of a package tool with custom string type connection which provides a secure way to manage credentials for external APIs and data sources, and it offers an improved user-friendly and intellisense experience compared to custom connections.
Tools used in this flow:
- custom package tool
Connections used in this flow:
- custom strong type connection
## Prerequisites
Install promptflow sdk and other dependencies:
```bash
pip install -r requirements.txt
```
## Setup connection
Create connection if you haven't done that.
```bash
# Override keys with --set to avoid yaml file changes
pf connection create -f my_custom_connection.yml --set secrets.api_key='<your_api_key>' configs.api_base='<your_api_base>'
```
Ensure you have created `my_custom_connection` connection.
```bash
pf connection show -n my_custom_connection
```
## Run flow
### Run with single line input
```bash
# test with default input value in flow.dag.yaml
pf flow test --flow .
# test with flow inputs
pf flow test --flow . --inputs text="Promptflow"
```
### Run with multiple lines data
- create run
```bash
pf run create --flow . --data ./data.jsonl --stream
```
- list and show run meta
```bash
# list created run
pf run list -r 3
# get a sample run name
name=$(pf run list -r 10 | jq '.[] | select(.name | contains("custom_strong_type")) | .name'| head -n 1 | tr -d '"')
# show specific run detail
pf run show --name $name
# show output
pf run show-details --name $name
# visualize run in browser
pf run visualize --name $name
```
### Run with connection override
Run flow with newly created connection.
```bash
pf run create --flow . --data ./data.jsonl --connections my_package_tool.connection=my_custom_connection --stream
```
| 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/custom-strong-type-connection-package-tool-showcase/my_custom_connection.yml | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/CustomStrongTypeConnection.schema.json
name: "my_custom_connection"
type: custom
custom_type: MyCustomConnection
module: my_tool_package.tools.tool_with_custom_strong_type_connection
package: my-tools-package
package_version: 0.0.5
configs:
api_base: "This is a fake api base." # String type. The api base.
secrets: # must-have
api_key: "to_replace_with_api_key" # Secret type. The api key get from "https://xxx.com".
| 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/custom-strong-type-connection-package-tool-showcase/requirements.txt | promptflow[azure]==1.1.0
my-tools-package==0.0.5
| 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/custom-strong-type-connection-package-tool-showcase/flow.dag.yaml | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs:
text:
type: string
default: Microsoft
outputs:
my_output:
type: string
reference: ${my_package_tool.output}
nodes:
- name: my_package_tool
type: python
source:
type: package
tool: my_tool_package.tools.tool_with_custom_strong_type_connection.my_tool
inputs:
connection: my_custom_connection
input_text: ${inputs.text}
| 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/cascading-inputs-tool-showcase/README.md | # Basic flow with package tool using cascading inputs
This is a flow demonstrating the use of a tool with cascading inputs which frequently used in situations where the selection in one input field determines what subsequent inputs should be shown,
and it helps in creating a more efficient, user-friendly, and error-free input process.
Tools used in this flow:
- `python` Tool
Connections used in this flow:
- None
## Prerequisites
Install promptflow sdk and other dependencies:
```bash
pip install -r requirements.txt
```
## Run flow
- Test flow
```bash
pf flow test --flow .
``` | 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/cascading-inputs-tool-showcase/requirements.txt | promptflow
my-tools-package==0.0.7 | 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/cascading-inputs-tool-showcase/flow.dag.yaml | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
environment:
python_requirements_txt: requirements.txt
inputs: {}
outputs:
user_id:
type: string
reference: ${Tool_with_Cascading_Inputs.output}
nodes:
- name: Tool_with_Cascading_Inputs
type: python
source:
type: package
tool: my_tool_package.tools.tool_with_cascading_inputs.my_tool
inputs:
user_type: student
student_id: "student_id"
| 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/filepath-input-tool-showcase/hello_method.py | def hello(input_text: str) -> str:
# Replace with your own code.
return "Hello " + input_text
| 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/filepath-input-tool-showcase/requirements.txt | promptflow
promptflow-tools
my-tools-package | 0 |
promptflow_repo/promptflow/examples/tools/use-cases | promptflow_repo/promptflow/examples/tools/use-cases/filepath-input-tool-showcase/flow.dag.yaml | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs:
input:
type: string
default: Microsoft
outputs:
output:
type: string
reference: ${Tool_with_FilePath_Input.output}
nodes:
- name: Tool_with_FilePath_Input
type: python
source:
type: package
tool: my_tool_package.tools.tool_with_file_path_input.my_tool
inputs:
input_text: ${inputs.input}
input_file: hello_method.py
| 0 |
promptflow_repo/promptflow/examples/tools | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/setup.py | from setuptools import find_packages, setup
PACKAGE_NAME = "my-tools-package"
setup(
name=PACKAGE_NAME,
version="0.0.12",
description="This is my tools package",
packages=find_packages(),
entry_points={
"package_tools": ["my_tools = my_tool_package.tools.utils:list_package_tools"],
},
include_package_data=True, # This line tells setuptools to include files from MANIFEST.in
extras_require={
"azure": [
"azure-ai-ml>=1.11.0,<2.0.0"
]
},
)
| 0 |
promptflow_repo/promptflow/examples/tools | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/MANIFEST.in | include my_tool_package/yamls/*.yaml | 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package/__init__.py | __path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package/tools/tool_with_generated_by_input.py | from typing import Union
from promptflow import tool
from typing import Dict, List
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection, CognitiveSearchConnection
def generate_index_json(
index_type: str,
index: str = "",
index_connection: CognitiveSearchConnection = "",
index_name: str = "",
content_field: str = "",
embedding_field: str = "",
metadata_field: str = "",
semantic_configuration: str = "",
embedding_connection: Union[AzureOpenAIConnection, OpenAIConnection] = "",
embedding_deployment: str = ""
) -> str:
"""This is a dummy function to generate a index json based on the inputs.
"""
import json
inputs = ""
if index_type == "Azure Cognitive Search":
# 1. Call to create a new index
# 2. Call to get the index yaml and return as a json
inputs = {
"index_type": index_type,
"index": "retrieved_index",
"index_connection": index_connection,
"index_name": index_name,
"content_field": content_field,
"embedding_field": embedding_field,
"metadata_field": metadata_field,
"semantic_configuration": semantic_configuration,
"embedding_connection": embedding_connection,
"embedding_deployment": embedding_deployment
}
elif index_type == "Workspace MLIndex":
# Call to get the index yaml and return as a json
inputs = {
"index_type": index_type,
"index": index,
"index_connection": "retrieved_index_connection",
"index_name": "retrieved_index_name",
"content_field": "retrieved_content_field",
"embedding_field": "retrieved_embedding_field",
"metadata_field": "retrieved_metadata_field",
"semantic_configuration": "retrieved_semantic_configuration",
"embedding_connection": "retrieved_embedding_connection",
"embedding_deployment": "retrieved_embedding_deployment"
}
result = json.dumps(inputs)
return result
def reverse_generate_index_json(index_json: str) -> Dict:
"""This is a dummy function to generate origin inputs from index_json.
"""
import json
# Calculate the UI inputs based on the index_json
result = json.loads(index_json)
return result
def list_index_types(subscription_id, resource_group_name, workspace_name) -> List[str]:
return [
{"value": "Azure Cognitive Search"},
{"value": "PineCone"},
{"value": "FAISS"},
{"value": "Workspace MLIndex"},
{"value": "MLIndex from path"}
]
def list_indexes(
subscription_id,
resource_group_name,
workspace_name
) -> List[Dict[str, Union[str, int, float, list, Dict]]]:
import random
words = ["apple", "banana", "cherry", "date", "elderberry", "fig", "grape", "honeydew", "kiwi", "lemon"]
result = []
for i in range(10):
random_word = f"{random.choice(words)}{i}"
cur_item = {
"value": random_word,
"display_value": f"index_{random_word}",
"hyperlink": f'https://www.bing.com/search?q={random_word}',
"description": f"this is {i} item",
}
result.append(cur_item)
return result
def list_fields(subscription_id, resource_group_name, workspace_name) -> List[str]:
return [
{"value": "id"},
{"value": "content"},
{"value": "catelog"},
{"value": "sourcepage"},
{"value": "sourcefile"},
{"value": "title"},
{"value": "content_hash"},
{"value": "meta_json_string"},
{"value": "content_vector_open_ai"}
]
def list_semantic_configuration(subscription_id, resource_group_name, workspace_name) -> List[str]:
return [{"value": "azureml-default"}]
def list_embedding_deployment(embedding_connection: str) -> List[str]:
return [{"value": "text-embedding-ada-002"}, {"value": "ada-1k-tpm"}]
@tool
def my_tool(index_json: str, queries: str, top_k: int) -> str:
return f"Hello {index_json}"
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package/tools/tool_with_file_path_input.py | import importlib
from pathlib import Path
from promptflow import tool
from promptflow.contracts.types import FilePath
@tool
def my_tool(input_file: FilePath, input_text: str) -> str:
# customise your own code to handle and use the input_file here
new_module = importlib.import_module(Path(input_file).stem)
return new_module.hello(input_text)
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package/tools/my_tool_2.py | from promptflow import ToolProvider, tool
from promptflow.connections import CustomConnection
class MyTool(ToolProvider):
"""
Doc reference :
"""
def __init__(self, connection: CustomConnection):
super().__init__()
self.connection = connection
@tool
def my_tool(self, input_text: str) -> str:
# Replace with your tool code.
# Usually connection contains configs to connect to an API.
# Use CustomConnection is a dict. You can use it like: connection.api_key, connection.api_base
# Not all tools need a connection. You can remove it if you don't need it.
return "Hello " + input_text
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package/tools/utils.py | from pathlib import Path
from ruamel.yaml import YAML
def collect_tools_from_directory(base_dir) -> dict:
tools = {}
yaml = YAML()
for f in Path(base_dir).glob("**/*.yaml"):
with open(f, "r") as f:
tools_in_file = yaml.load(f)
for identifier, tool in tools_in_file.items():
tools[identifier] = tool
return tools
def list_package_tools():
"""List package tools"""
yaml_dir = Path(__file__).parents[1] / "yamls"
return collect_tools_from_directory(yaml_dir)
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package/tools/__init__.py | __path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package/tools/tool_with_custom_strong_type_connection.py | from promptflow import tool
from promptflow.connections import CustomStrongTypeConnection
from promptflow.contracts.types import Secret
class MyCustomConnection(CustomStrongTypeConnection):
"""My custom strong type connection.
:param api_key: The api key get from "https://xxx.com".
:type api_key: Secret
:param api_base: The api base.
:type api_base: String
"""
api_key: Secret
api_base: str = "This is a fake api base."
@tool
def my_tool(connection: MyCustomConnection, input_text: str) -> str:
# Replace with your tool code.
# Use custom strong type connection like: connection.api_key, connection.api_base
return "Hello " + input_text
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package/tools/tool_with_dynamic_list_input.py | from promptflow import tool
from typing import List, Union, Dict
def my_list_func(prefix: str = "", size: int = 10, **kwargs) -> List[Dict[str, Union[str, int, float, list, Dict]]]:
"""This is a dummy function to generate a list of items.
:param prefix: prefix to add to each item.
:param size: number of items to generate.
:param kwargs: other parameters.
:return: a list of items. Each item is a dict with the following keys:
- value: for backend use. Required.
- display_value: for UI display. Optional.
- hyperlink: external link. Optional.
- description: information icon tip. Optional.
"""
import random
words = ["apple", "banana", "cherry", "date", "elderberry", "fig", "grape", "honeydew", "kiwi", "lemon"]
result = []
for i in range(size):
random_word = f"{random.choice(words)}{i}"
cur_item = {
"value": random_word,
"display_value": f"{prefix}_{random_word}",
"hyperlink": f'https://www.bing.com/search?q={random_word}',
"description": f"this is {i} item",
}
result.append(cur_item)
return result
def list_endpoint_names(subscription_id, resource_group_name, workspace_name, prefix: str = "") -> List[Dict[str, str]]:
"""This is an example to show how to get Azure ML resource in tool input list function.
:param subscription_id: Azure subscription id.
:param resource_group_name: Azure resource group name.
:param workspace_name: Azure ML workspace name.
:param prefix: prefix to add to each item.
"""
from azure.ai.ml import MLClient
from azure.identity import DefaultAzureCredential
credential = DefaultAzureCredential()
credential.get_token("https://management.azure.com/.default")
ml_client = MLClient(
credential=credential,
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name)
result = []
for ep in ml_client.online_endpoints.list():
hyperlink = (
f"https://ml.azure.com/endpoints/realtime/{ep.name}/detail?wsid=/subscriptions/"
f"{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft."
f"MachineLearningServices/workspaces/{workspace_name}"
)
cur_item = {
"value": ep.name,
"display_value": f"{prefix}_{ep.name}",
# external link to jump to the endpoint page.
"hyperlink": hyperlink,
"description": f"this is endpoint: {ep.name}",
}
result.append(cur_item)
return result
@tool
def my_tool(input_prefix: str, input_text: list, endpoint_name: str) -> str:
return f"Hello {input_prefix} {','.join(input_text)} {endpoint_name}"
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package/tools/tool_with_custom_llm_type.py | from jinja2 import Template
from promptflow import tool
from promptflow.connections import CustomConnection
from promptflow.contracts.types import PromptTemplate
@tool
def my_tool(connection: CustomConnection, prompt: PromptTemplate, **kwargs) -> str:
# Replace with your tool code, customise your own code to handle and use the prompt here.
# Usually connection contains configs to connect to an API.
# Not all tools need a connection. You can remove it if you don't need it.
rendered_prompt = Template(prompt, trim_blocks=True, keep_trailing_newline=True).render(**kwargs)
return rendered_prompt
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package/tools/my_tool_1.py | from promptflow import tool
from promptflow.connections import CustomConnection
@tool
def my_tool(connection: CustomConnection, input_text: str) -> str:
# Replace with your tool code.
# Usually connection contains configs to connect to an API.
# Use CustomConnection is a dict. You can use it like: connection.api_key, connection.api_base
# Not all tools need a connection. You can remove it if you don't need it.
return "Hello " + input_text
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package/tools/tool_with_cascading_inputs.py | from enum import Enum
from promptflow import tool
class UserType(str, Enum):
STUDENT = "student"
TEACHER = "teacher"
@tool
def my_tool(user_type: Enum, student_id: str = "", teacher_id: str = "") -> str:
"""This is a dummy function to support cascading inputs.
:param user_type: user type, student or teacher.
:param student_id: student id.
:param teacher_id: teacher id.
:return: id of the user.
If user_type is student, return student_id.
If user_type is teacher, return teacher_id.
"""
if user_type == UserType.STUDENT:
return student_id
elif user_type == UserType.TEACHER:
return teacher_id
else:
raise Exception("Invalid user.")
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package/yamls/tool_with_custom_llm_type.yaml | my_tool_package.tools.tool_with_custom_llm_type.my_tool:
name: My Custom LLM Tool
description: This is a tool to demonstrate how to customize an LLM tool with a PromptTemplate.
type: custom_llm
module: my_tool_package.tools.tool_with_custom_llm_type
function: my_tool
inputs:
connection:
type:
- CustomConnection
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package/yamls/tool_with_generated_by_input.yaml | my_tool_package.tools.tool_with_generated_by_input.my_tool:
function: my_tool
inputs:
index_json:
type:
- string
generated_by:
func_path: my_tool_package.tools.tool_with_generated_by_input.generate_index_json
func_kwargs:
- name: index_type
type:
- string
reference: ${inputs.index_type}
- name: index
type:
- string
optional: true
reference: ${inputs.index}
- name: index_connection
type: [CognitiveSearchConnection]
optional: true
reference: ${inputs.index_connection}
- name: index_name
type:
- string
optional: true
reference: ${inputs.index_name}
- name: content_field
type:
- string
optional: true
reference: ${inputs.content_field}
- name: embedding_field
type:
- string
optional: true
reference: ${inputs.embedding_field}
- name: metadata_field
type:
- string
optional: true
reference: ${inputs.metadata_field}
- name: semantic_configuration
type:
- string
optional: true
reference: ${inputs.semantic_configuration}
- name: embedding_connection
type: [AzureOpenAIConnection, OpenAIConnection]
optional: true
reference: ${inputs.embedding_connection}
- name: embedding_deployment
type:
- string
optional: true
reference: ${inputs.embedding_deployment}
reverse_func_path: my_tool_package.tools.tool_with_generated_by_input.reverse_generate_index_json
queries:
type:
- string
top_k:
type:
- int
index_type:
type:
- string
dynamic_list:
func_path: my_tool_package.tools.tool_with_generated_by_input.list_index_types
input_type: uionly_hidden
index:
type:
- string
enabled_by: index_type
enabled_by_value: ["Workspace MLIndex"]
dynamic_list:
func_path: my_tool_package.tools.tool_with_generated_by_input.list_indexes
input_type: uionly_hidden
index_connection:
type: [CognitiveSearchConnection]
enabled_by: index_type
enabled_by_value: ["Azure Cognitive Search"]
input_type: uionly_hidden
index_name:
type:
- string
enabled_by: index_type
enabled_by_value: ["Azure Cognitive Search"]
input_type: uionly_hidden
content_field:
type:
- string
enabled_by: index_type
enabled_by_value: ["Azure Cognitive Search"]
dynamic_list:
func_path: my_tool_package.tools.tool_with_generated_by_input.list_fields
input_type: uionly_hidden
embedding_field:
type:
- string
enabled_by: index_type
enabled_by_value: ["Azure Cognitive Search"]
dynamic_list:
func_path: my_tool_package.tools.tool_with_generated_by_input.list_fields
input_type: uionly_hidden
metadata_field:
type:
- string
enabled_by: index_type
enabled_by_value: ["Azure Cognitive Search"]
dynamic_list:
func_path: my_tool_package.tools.tool_with_generated_by_input.list_fields
input_type: uionly_hidden
semantic_configuration:
type:
- string
enabled_by: index_type
enabled_by_value: ["Azure Cognitive Search"]
dynamic_list:
func_path: my_tool_package.tools.tool_with_generated_by_input.list_semantic_configuration
input_type: uionly_hidden
embedding_connection:
type: [AzureOpenAIConnection, OpenAIConnection]
enabled_by: index_type
enabled_by_value: ["Azure Cognitive Search"]
input_type: uionly_hidden
embedding_deployment:
type:
- string
enabled_by: index_type
enabled_by_value: ["Azure Cognitive Search"]
dynamic_list:
func_path: my_tool_package.tools.tool_with_generated_by_input.list_embedding_deployment
func_kwargs:
- name: embedding_connection
type:
- string
reference: ${inputs.embedding_connection}
input_type: uionly_hidden
module: my_tool_package.tools.tool_with_generated_by_input
name: Tool with Generated By Input
description: This is a tool with generated by input
type: python
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package/yamls/tool_with_dynamic_list_input.yaml | my_tool_package.tools.tool_with_dynamic_list_input.my_tool:
function: my_tool
inputs:
input_prefix:
type:
- string
input_text:
type:
- list
dynamic_list:
func_path: my_tool_package.tools.tool_with_dynamic_list_input.my_list_func
func_kwargs:
- name: prefix # argument name to be passed to the function
type:
- string
# if optional is not specified, default to false.
# this is for UX pre-validaton. If optional is false, but no input. UX can throw error in advanced.
optional: true
reference: ${inputs.input_prefix} # dynamic reference to another input parameter
- name: size # another argument name to be passed to the function
type:
- int
optional: true
default: 10
# enum and dynamic list may need below setting.
# allow user to enter input value manually, default false.
allow_manual_entry: true
# allow user to select multiple values, default false.
is_multi_select: true
endpoint_name:
type:
- string
dynamic_list:
func_path: my_tool_package.tools.tool_with_dynamic_list_input.list_endpoint_names
func_kwargs:
- name: prefix
type:
- string
optional: true
reference: ${inputs.input_prefix}
allow_manual_entry: false
is_multi_select: false
module: my_tool_package.tools.tool_with_dynamic_list_input
name: My Tool with Dynamic List Input
description: This is my tool with dynamic list input
type: python
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package/yamls/my_tool_2.yaml | my_tool_package.tools.my_tool_2.MyTool.my_tool:
class_name: MyTool
function: my_tool
inputs:
connection:
type:
- CustomConnection
input_text:
type:
- string
module: my_tool_package.tools.my_tool_2
name: My Second Tool
description: This is my second tool
type: python
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package/yamls/my_tool_1.yaml | my_tool_package.tools.my_tool_1.my_tool:
function: my_tool
inputs:
connection:
type:
- CustomConnection
input_text:
type:
- string
module: my_tool_package.tools.my_tool_1
name: My First Tool
description: This is my first tool
type: python
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package/yamls/tool_with_cascading_inputs.yaml | my_tool_package.tools.tool_with_cascading_inputs.my_tool:
function: my_tool
inputs:
user_type:
type:
- string
enum:
- student
- teacher
student_id:
type:
- string
enabled_by: user_type
enabled_by_value: [student]
teacher_id:
type:
- string
enabled_by: user_type
enabled_by_value: [teacher]
module: my_tool_package.tools.tool_with_cascading_inputs
name: My Tool with Cascading Inputs
description: This is my tool with cascading inputs
type: python | 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package/yamls/tool_with_file_path_input.yaml | my_tool_package.tools.tool_with_file_path_input.my_tool:
function: my_tool
inputs:
input_file:
type:
- file_path
input_text:
type:
- string
module: my_tool_package.tools.tool_with_file_path_input
name: Tool with FilePath Input
description: This is a tool to demonstrate the usage of FilePath input
type: python
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/my_tool_package/yamls/tool_with_custom_strong_type_connection.yaml | my_tool_package.tools.tool_with_custom_strong_type_connection.my_tool:
description: This is my tool with custom strong type connection.
function: my_tool
inputs:
connection:
custom_type:
- MyCustomConnection
type:
- CustomConnection
input_text:
type:
- string
module: my_tool_package.tools.tool_with_custom_strong_type_connection
name: Tool With Custom Strong Type Connection
type: python
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/tests/test_my_tool_2.py | import pytest
import unittest
from promptflow.connections import CustomConnection
from my_tool_package.tools.my_tool_2 import MyTool
@pytest.fixture
def my_custom_connection() -> CustomConnection:
my_custom_connection = CustomConnection(
{
"api-key" : "my-api-key",
"api-secret" : "my-api-secret",
"api-url" : "my-api-url"
}
)
return my_custom_connection
@pytest.fixture
def my_tool_provider(my_custom_connection) -> MyTool:
my_tool_provider = MyTool(my_custom_connection)
return my_tool_provider
class TestMyTool2:
def test_my_tool_2(self, my_tool_provider: MyTool):
result = my_tool_provider.my_tool(input_text="Microsoft")
assert result == "Hello Microsoft"
# Run the unit tests
if __name__ == "__main__":
unittest.main()
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/tests/test_tool_with_file_path_input.py | import pytest
import unittest
from promptflow.contracts.types import FilePath
from my_tool_package.tools.tool_with_file_path_input import my_tool
@pytest.fixture
def my_file_path_input() -> FilePath:
my_file_path_input = FilePath("tests.test_utils.hello_method.py")
return my_file_path_input
class TestToolWithFilePathInput:
def test_tool_with_file_path_input(self, my_file_path_input):
result = my_tool(my_file_path_input, input_text="Microsoft")
assert result == "Hello Microsoft"
# Run the unit tests
if __name__ == "__main__":
unittest.main()
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/tests/test_tool_with_custom_strong_type_connection.py | import pytest
import unittest
from my_tool_package.tools.tool_with_custom_strong_type_connection import MyCustomConnection, my_tool
@pytest.fixture
def my_custom_connection() -> MyCustomConnection:
my_custom_connection = MyCustomConnection(
{
"api_key" : "my-api-key",
"api_base" : "my-api-base"
}
)
return my_custom_connection
class TestMyToolWithCustomStrongTypeConnection:
def test_my_tool(self, my_custom_connection):
result = my_tool(my_custom_connection, input_text="Microsoft")
assert result == "Hello Microsoft"
# Run the unit tests
if __name__ == "__main__":
unittest.main()
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/tests/test_tool_with_cascading_inputs.py | from my_tool_package.tools.tool_with_cascading_inputs import my_tool
def test_my_tool():
result = my_tool(user_type="student", student_id="student_id")
assert result == '123'
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/tests/test_my_tool_1.py | import pytest
import unittest
from promptflow.connections import CustomConnection
from my_tool_package.tools.my_tool_1 import my_tool
@pytest.fixture
def my_custom_connection() -> CustomConnection:
my_custom_connection = CustomConnection(
{
"api-key" : "my-api-key",
"api-secret" : "my-api-secret",
"api-url" : "my-api-url"
}
)
return my_custom_connection
class TestMyTool1:
def test_my_tool_1(self, my_custom_connection):
result = my_tool(my_custom_connection, input_text="Microsoft")
assert result == "Hello Microsoft"
# Run the unit tests
if __name__ == "__main__":
unittest.main()
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/tests/test_tool_with_dynamic_input.py | from my_tool_package.tools.tool_with_dynamic_list_input import my_tool, my_list_func
def test_my_tool():
result = my_tool(input_text=["apple", "banana"], input_prefix="My")
assert result == 'Hello My apple,banana'
def test_my_list_func():
result = my_list_func(prefix="My")
assert len(result) == 10
assert "value" in result[0]
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/tests/test_tool_with_generated_by_input.py | import json
import pytest
import unittest
from my_tool_package.tools.tool_with_generated_by_input import (
generate_index_json,
list_embedding_deployment,
list_fields,
list_indexes,
list_index_types,
list_semantic_configuration,
my_tool,
reverse_generate_index_json,
)
@pytest.mark.parametrize("index_type", ["Azure Cognitive Search", "Workspace MLIndex"])
def test_my_tool(index_type):
index_json = generate_index_json(index_type=index_type)
result = my_tool(index_json, "", "")
assert result == f'Hello {index_json}'
def test_generate_index_json():
index_type = "Azure Cognitive Search"
index_json = generate_index_json(index_type=index_type)
indexes = json.loads(index_json)
assert indexes["index_type"] == index_type
def test_reverse_generate_index_json():
index_type = "Workspace MLIndex"
index = list_indexes("", "", "")
inputs = {
"index_type": index_type,
"index": index,
"index_connection": "retrieved_index_connection",
"index_name": "retrieved_index_name",
"content_field": "retrieved_content_field",
"embedding_field": "retrieved_embedding_field",
"metadata_field": "retrieved_metadata_field",
"semantic_configuration": "retrieved_semantic_configuration",
"embedding_connection": "retrieved_embedding_connection",
"embedding_deployment": "retrieved_embedding_deployment"
}
input_json = json.dumps(inputs)
result = reverse_generate_index_json(input_json)
for k, v in inputs.items():
assert result[k] == v
def test_list_index_types():
result = list_index_types("", "", "")
assert isinstance(result, list)
assert len(result) == 5
def test_list_indexes():
result = list_indexes("", "", "")
assert isinstance(result, list)
assert len(result) == 10
for item in result:
assert isinstance(item, dict)
def test_list_fields():
result = list_fields("", "", "")
assert isinstance(result, list)
assert len(result) == 9
for item in result:
assert isinstance(item, dict)
def test_list_semantic_configuration():
result = list_semantic_configuration("", "", "")
assert len(result) == 1
assert isinstance(result[0], dict)
def test_list_embedding_deployment():
result = list_embedding_deployment("")
assert len(result) == 2
for item in result:
assert isinstance(item, dict)
if __name__ == "__main__":
unittest.main()
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/tests/test_tool_with_custom_llm_type.py | import pytest
import unittest
from promptflow.connections import CustomConnection
from my_tool_package.tools.tool_with_custom_llm_type import my_tool
@pytest.fixture
def my_custom_connection() -> CustomConnection:
my_custom_connection = CustomConnection(
{
"api-key" : "my-api-key",
"api-secret" : "my-api-secret",
"api-url" : "my-api-url"
}
)
return my_custom_connection
class TestToolWithCustomLLMType:
def test_tool_with_custom_llm_type(self, my_custom_connection):
result = my_tool(my_custom_connection, "Hello {{text}}", text="Microsoft")
assert result == "Hello Microsoft"
# Run the unit tests
if __name__ == "__main__":
unittest.main()
| 0 |
promptflow_repo/promptflow/examples/tools/tool-package-quickstart/tests | promptflow_repo/promptflow/examples/tools/tool-package-quickstart/tests/test_utils/hello_method.py | def hello(input_text: str) -> str:
# Replace with your own code.
return "Hello " + input_text
| 0 |
promptflow_repo/promptflow/examples/flows | promptflow_repo/promptflow/examples/flows/integrations/README.md | # Integrations Folder
This folder contains flow examples contributed by various contributors. Each flow example should have a README.md file that provides a comprehensive introduction to the flow and includes contact information for the flow owner.
# Guideline for README.md of flows
To ensure consistency and clarity, please follow the guidelines below when creating the README.md file for your flow example. You can also refer to the [README.md](../standard/web-classification/README.md) file in the [web-classification](../standard/web-classification) flow example as a reference.
Note: Above sample README.md may not have contact information because it's a shared example and people can open issues to this repository if they have any questions about the flow example. For integration samples, **please make sure to include contact information in your README.md file**.
## Introduction (Required)
Provide a detailed description of the flow, including its components, inputs, outputs, and any dependencies. Explain how the flow works and what problem it solves. This section should give users a clear understanding of the flow's functionality and how it can be used.
## Tools Used in this Flow (Required)
List all the tools (functions) used in the flow. This can include both standard tools provided by prompt flow and any custom tools created specifically for the flow. Include a brief description of each tool and its purpose within the flow.
## Pre-requisites (Required)
List any pre-requisites that are required to run the flow. This can include any specific versions of prompt flow or other dependencies. If there are any specific configurations or settings that need to be applied, make sure to mention them in this section.
## Getting Started (Required)
Provide step-by-step instructions on how to get started with the flow. This should include any necessary setup or configuration steps, such as installing dependencies or setting up connections. If there are specific requirements or prerequisites, make sure to mention them in this section.
## Usage Examples
Include usage examples that demonstrate how to run the flow and provide input data. This can include command-line instructions or code snippets. Show users how to execute the flow and explain the expected output or results.
## Troubleshooting
If there are any known issues or troubleshooting tips related to the flow, include them in this section. Provide solutions or workarounds for common problems that users may encounter. This will help users troubleshoot issues on their own and reduce the need for support.
## Contribution Guidelines
If you would like to encourage other users to contribute to your flow or provide guidelines for contributing to the integration folder, include a section with contribution guidelines. This can include instructions on how to submit pull requests, guidelines for code formatting, or any other relevant information.
## Contact (Required)
Specify the flow owner and provide contact information in the README.md file. This can include an email address, GitHub username, or any other preferred method of contact. By including this information, users will be able to reach out to the owner with any questions or issues related to the flow.
# Conclusion
By following these guidelines, you can create a well-structured and informative README.md file for your flow example. This will help users understand and utilize your flow effectively. If you have any further questions or need assistance, please don't hesitate to reach out. Happy contributing!
| 0 |
promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language | promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language/connections/azure_ai_language.yml | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/CustomConnection.schema.json
name: azure_ai_language_connection
type: custom
configs:
endpoint: "<azure-language-resource-endpoint>"
secrets:
api_key: "<to-be-replaced>" | 0 |
promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language | promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language/connections/azure_ai_translator.yml | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/CustomConnection.schema.json
name: azure_ai_translator_connection
type: custom
configs:
endpoint: "<azure-translator-resource-endpoint>"
region: "<azure-translator-resource-region>"
secrets:
api_key: "<to-be-replaced>" | 0 |
promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language | promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language/analyze_documents/data.jsonl | {"document_path": "./document1.txt", "language": "en"}
{"document_path": "./document2.txt", "language": "en"} | 0 |
promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language | promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language/analyze_documents/README.md | # Analyze Documents
A flow that analyzes documents with various language-based Machine Learning models.
This sample flow utilizes Azure AI Language's pre-built and optimized language models to perform various analyses on text or documents. It performs:
- [Translation](https://learn.microsoft.com/en-us/rest/api/cognitiveservices/translator/translator/translate?view=rest-cognitiveservices-translator-v3.0&tabs=HTTP)
- [Personally Identifiable Information (PII) detection](https://learn.microsoft.com/en-us/azure/ai-services/language-service/personally-identifiable-information/overview)
- [Named Entity Recognition (NER)](https://learn.microsoft.com/en-us/azure/ai-services/language-service/named-entity-recognition/overview)
- [Document Summarization](https://learn.microsoft.com/en-us/azure/ai-services/language-service/summarization/overview?tabs=document-summarization)
- [Sentiment Analysis & Opinion Mining](https://learn.microsoft.com/en-us/azure/ai-services/language-service/sentiment-opinion-mining/overview?tabs=prebuilt)
See the [promptflow-azure-ai-language](https://github.com/microsoft/promptflow/blob/main/docs/integrations/tools/azure_ai_language_tool.md) tool package reference documentation for further information.
Tools used in this flow:
- `python` tool
- `translator` tool from the `promptflow-azure-ai-language` package
- `pii_entity_recognition` tool from the `promptflow-azure-ai-language` package
- `abstractive_summarization` tool from the `promptflow-azure-ai-language` package
- `extractive_summarization` tool from the `promptflow-azure-ai-language` package
- `entity_recognition` tool from the `promptflow-azure-ai-language` package
- `sentiment_analysis` tool from the `promptflow-azure-ai-language` package
Connections used in this flow:
- `Custom` connection (Azure AI Language)
- `Custom` connection (Azure AI Translator)
## Prerequisites
Install promptflow sdk and other dependencies:
```
pip install -r requirements.txt
```
## Setup connection
Prepare your [Azure AI Language Resource](https://azure.microsoft.com/en-us/products/ai-services/ai-language) first, and [create a Language Resource](https://portal.azure.com/#create/Microsoft.CognitiveServicesTextAnalytics) if necessary. From your Language Resource, obtain its `api_key` and `endpoint`.
Create a connection to your Language Resource. The connection uses the `CustomConnection` schema:
```
# Override keys with --set to avoid yaml file changes
pf connection create -f ../connections/azure_ai_language.yml --set secrets.api_key=<your_api_key> configs.endpoint=<your_endpoint> name=azure_ai_language_connection
```
Ensure you have created the `azure_ai_language_connection`:
```
pf connection show -n azure_ai_language_connection
```
To use the `translator` tool, you must have an existing [Azure AI Translator resource](https://azure.microsoft.com/en-us/products/ai-services/ai-translator). [Create a Translator resource](https://learn.microsoft.com/en-us/azure/ai-services/translator/create-translator-resource) first, if necessary. From your Translator Resource, obtain its `api_key`, `endpoint`, and `region` (if applicable).
Create a connection to your Translator Resource. The connection uses the `CustomConnection` schema:
```
# Override keys with --set to avoid yaml file changes
pf connection create -f ../connections/azure_ai_translator.yml --set secrets.api_key=<your_api_key> configs.endpoint=<your_endpoint> configs.region=<your_region> name=azure_ai_translator_connection
```
Ensure you have created the `azure_ai_translator_connection`:
```
pf connection show -n azure_ai_translator_connection
```
## Run flow
### Run with single line input
```
# Test with default input values in flow.dag.yaml:
pf flow test --flow .
# Test with specific input:
pf flow test --flow . --inputs document_path=<path_to_txt_file> language=<document_language_code>
```
### Run with multiple lines of data
```
pf run create --flow . --data ./data.jsonl --column-mapping document_path='${data.document_path}' language='${data.language}' --stream
```
You can also skip providing column-mapping if provided data has same column name as the flow. Reference [here](https://microsoft.github.io/promptflow/how-to-guides/run-and-evaluate-a-flow/use-column-mapping.html) for default behavior when column-mapping not provided in CLI.
### Flow description
The flow first uses a `python` node to read in the provided `.txt` file into a string. This string is passed to a `pii_entity_recognition` node where Personally Identifiable Information (PII) is redacted. The redacted text is passed to `abstractive_summarization`, `extractive_summarization` and `entity_recognition` nodes, where summaries and named-entities are obtained. Finally, the generated abstractive summary is forwarded to a `sentiment_analysis` node to analyze its general sentiment.
### Contact
Please reach out to Sean Murray (<[email protected]>) or <[email protected]> with any issues. | 0 |
promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language | promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language/analyze_documents/read_file.py | from promptflow import tool
@tool
def read_file(file_path: str) -> str:
"""
This tool opens a file and reads its contents into a string.
:param file_path: the file path of the file to be read.
"""
with open(file_path, 'r', encoding="utf8") as f:
file = f.read()
return file
| 0 |
promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language | promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language/analyze_documents/parse_translation.py | from promptflow import tool
@tool
def parse_translation(translation_results: dict, language: str) -> str:
return translation_results[language]
| 0 |
promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language | promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language/analyze_documents/document2.txt | Siemens and Microsoft partner to drive cross-industry AI adoption
October 31, 2023 | Microsoft News Center
Share on Facebook (opens new window)
Share on LinkedIn (opens new window)
Share on Twitter (opens new window)
Companies introduce Siemens Industrial Copilot, a generative AI-powered assistant, designed to enhance human-machine collaboration and boost productivity.
Companies will work together to build additional copilots for manufacturing, infrastructure, transportation, and healthcare industries.
Leading automotive supplier, Schaeffler AG, is an early adopter of Siemens Industrial Copilot.
In addition, the Siemens Teamcenter app for Microsoft Teams will be generally available in December 2023 and accelerate innovation across the product lifecycle.
Woman refers to what's on her mobile device as she works in an environment with many wires and cables. Imagery with graphs and numbers is superimposed on the photo.
Siemens and Microsoft partner to drive cross-industry AI adoption
Microsoft and Siemens are deepening their partnership by bringing the benefits of generative AI to industries worldwide. As a first step, the companies are introducing Siemens Industrial Copilot, an AI-powered jointly developed assistant aimed at improving human-machine collaboration in manufacturing. In addition, the launch of the integration between Siemens Teamcenter software for product lifecycle management and Microsoft Teams will further pave the way to enabling the industrial metaverse. It will simplify virtual collaboration of design engineers, frontline workers, and other teams across business functions.
"With this next generation of AI, we have a unique opportunity to accelerate innovation across the entire industrial sector," said Satya Nadella, Chairman and CEO, Microsoft. "We're building on our longstanding collaboration with Siemens and bringing together AI advances across the Microsoft Cloud with Siemens' industrial domain expertise to empower both frontline and knowledge workers with new, AI-powered tools, starting with Siemens Industrial Copilot."
"Together with Microsoft, our shared vision is to empower customers with the adoption of generative AI," says Roland Busch, CEO of Siemens AG. "This has the potential to revolutionize the way companies design, develop, manufacture, and operate. Making human-machine collaboration more widely available allows engineers to accelerate code development, increase innovation and tackle skilled labor shortages."
A new era of human-machine collaboration
Siemens Industrial Copilot will allow users to rapidly generate, optimize and debug complex automation code, and significantly shorten simulation times. This will reduce a task that previously took weeks to minutes. The copilot ingests automation and process simulation information from Siemens' open digital business platform, Siemens Xcelerator, and enhances it with Microsoft's Azure OpenAI Service. Customers maintain full control over their data, and it is not used to train underlying AI models.
Siemens Industrial Copilot promises to boost productivity and efficiency across the industrial lifecycle. Using natural language, maintenance staff can be assisted with detailed repair instructions and engineers with quick access to simulation tools.
The vision: Copilots for all industries
The companies envision AI copilots assisting professionals in various industries, including manufacturing, infrastructure, transportation, and healthcare. Numerous copilots are already planned in the manufacturing sectors, such as automotive, consumer package goods and machine building.
Schaeffler AG, a leading automotive supplier, is among the first in the automotive industry to embrace generative AI in the engineering phase. This helps its engineers to generate reliable code for programming industrial automation systems such as robots. In addition, the company intends to incorporate Siemens Industrial Copilot during their own operations, aiming to significantly reduce downtimes, and also for their clients at a later stage.
"With this joint pilot, we're stepping into a new age of productivity and innovation. This Siemens Industrial Copilot will help our team work more efficiently, reduce repetitive tasks, and unleash creativity. We're excited to partner with Siemens and Microsoft on this project," says Klaus Rosenfeld, CEO of Schaeffler Group.
Generative AI facilitates virtual collaboration
To bring virtual collaboration across teams to the next level, Teamcenter for Microsoft Teams will be generally available beginning December 2023. This new app uses the latest advances in generative AI to connect functions across the product design and manufacturing lifecycle such as frontline workers to engineering teams. It connects Siemens' Teamcenter software for product lifecycle management (PLM) with Microsoft's collaboration platform Teams to make data more accessible for factory and field service workers. This will enable millions of workers who do not have access to PLM tools today to contribute to the design and manufacturing process more easily as part of their daily work.
Siemens will share more details on Siemens Industrial Copilot at the SPS expo in Nuremberg, Germany, in November 2023.
Siemens AG
Siemens AG (Berlin and Munich) is a technology company focused on industry, infrastructure, transport, and healthcare. From more resource-efficient factories, resilient supply chains, and smarter buildings and grids, to cleaner and more comfortable transportation as well as advanced healthcare, the company creates technology with purpose adding real value for customers. By combining the real and the digital worlds, Siemens empowers its customers to transform their industries and markets, helping them to transform the everyday for billions of people. Siemens also owns a majority stake in the publicly listed company Siemens Healthineers, a globally leading medical technology provider shaping the future of healthcare. In addition, Siemens holds a minority stake in Siemens Energy, a global leader in the transmission and generation of electrical power.
In fiscal 2022, which ended on September 30, 2022, the Siemens Group generated revenue of €72.0 billion and net income of €4.4 billion. As of September 30, 2022, the company employed around 311,000 people worldwide. Further information is available on the Internet at www.siemens.com.
Siemens global Website
Discover Siemens as a strong partner, technological pioneer and responsible employer.
About Microsoft
Microsoft (Nasdaq "MSFT" @microsoft) enables digital transformation for the era of an intelligent cloud and an intelligent edge. Its mission is to empower every person and every organization on the planet to achieve more. | 0 |
promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language | promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language/analyze_documents/document1.txt | La fortaleza de Microsoft Cloud impulsa los resultados del primer trimestre
24 de octubre de 2023 | Centro de noticias de Microsoft
Compartir en Facebook (se abre en una ventana nueva)
Compartir en LinkedIn (se abre en una ventana nueva)
Compartir en Twitter (se abre en una ventana nueva)
REDMOND, Washington — 24 de octubre de 2023 — Microsoft Corp. anunció hoy los siguientes resultados para el trimestre finalizado el 30 de septiembre de 2023, en comparación con el período correspondiente del año fiscal anterior:
Los ingresos fueron de 56.500 millones de dólares y aumentaron un 13% (un 12% más en moneda constante)
Los ingresos operativos fueron de 26.900 millones de dólares y aumentaron un 25% (un 24% más en moneda constante)
Los ingresos netos fueron de 22.300 millones de dólares y aumentaron un 27% (un 26% más en moneda constante)
El beneficio diluido por acción fue de 2,99 dólares y aumentó un 27% (un 26% más en moneda constante)
"Con los copilotos, estamos haciendo que la era de la IA sea real para las personas y las empresas de todo el mundo", dijo Satya Nadella, presidente y director ejecutivo de Microsoft. "Estamos infundiendo rápidamente la IA en todas las capas de la pila tecnológica y para cada función y proceso empresarial para impulsar el aumento de la productividad de nuestros clientes".
"La ejecución consistente por parte de nuestros equipos de ventas y socios impulsó un fuerte comienzo del año fiscal con ingresos de Microsoft Cloud de 31.800 millones de dólares, un aumento del 24% (un 23% más en moneda constante) año tras año", dijo Amy Hood, vicepresidenta ejecutiva y directora financiera de Microsoft.
Aspectos destacados del negocio
Los ingresos en Productividad y Procesos de Negocio fueron de 18.600 millones de dólares y aumentaron un 13% (un 12% más en moneda constante), con los siguientes aspectos destacados del negocio:
Los ingresos por productos comerciales y servicios en la nube de Office aumentaron un 15 % (un 14 % en moneda constante) impulsados por un crecimiento de los ingresos comerciales de Office 365 del 18 % (un 17 % en moneda constante)
Los ingresos de productos de Office Consumer y servicios en la nube aumentaron un 3 % (un 4 % más en moneda constante) y los suscriptores de Microsoft 365 Consumer crecieron hasta los 76,7 millones
Los ingresos de LinkedIn aumentaron un 8%
Los ingresos por productos y servicios en la nube de Dynamics aumentaron un 22 % (un 21 % en moneda constante) impulsados por un crecimiento de los ingresos de Dynamics 365 del 28 % (un 26 % más en moneda constante)
Los ingresos en Intelligent Cloud fueron de 24.300 millones de dólares y aumentaron un 19%, con los siguientes aspectos destacados del negocio:
Los ingresos por productos de servidor y servicios en la nube aumentaron un 21 % impulsados por el crecimiento de los ingresos de Azure y otros servicios en la nube del 29 % (un 28 % más en moneda constante)
Los ingresos en More Personal Computing fueron de 13.700 millones de dólares y aumentaron un 3% (un 2% más en moneda constante), con los siguientes aspectos comerciales destacados:
Los ingresos de Windows aumentaron un 5 % con un crecimiento de los ingresos de OEM de Windows del 4 % y un crecimiento de los ingresos de productos comerciales y servicios en la nube de Windows del 8 %
Los ingresos por dispositivos disminuyeron un 22%
Los ingresos por contenido y servicios de Xbox aumentaron un 13 % (un 12 % más en moneda constante)
Los ingresos por publicidad en búsquedas y noticias, excluyendo los costes de adquisición de tráfico, aumentaron un 10 % (un 9 % más en moneda constante)
Microsoft devolvió 9.100 millones de dólares a los accionistas en forma de recompras de acciones y dividendos en el primer trimestre del año fiscal 2024.
Perspectivas de negocio
Microsoft proporcionará orientación prospectiva en relación con este anuncio de ganancias trimestrales en su conferencia telefónica y webcast de ganancias.
Aspectos destacados trimestrales, lanzamientos de productos y mejoras
Cada trimestre, Microsoft ofrece cientos de productos, ya sea como nuevas versiones, servicios o mejoras de los productos y servicios actuales. Estos lanzamientos son el resultado de importantes inversiones en investigación y desarrollo, realizadas a lo largo de varios años, diseñadas para ayudar a los clientes a ser más productivos y seguros y para ofrecer un valor diferenciado en la nube y el perímetro.
Estos son los principales lanzamientos de productos y otros aspectos destacados del trimestre, organizados por categorías de productos, para ayudar a ilustrar cómo estamos acelerando la innovación en nuestros negocios al tiempo que ampliamos nuestras oportunidades de mercado.
Ambientales, Sociales y de Gobernanza (ESG)
Para ejecutar mejor la misión de Microsoft, enfocamos nuestros esfuerzos ambientales, sociales y de gobernanza (ESG) donde podemos tener el impacto más positivo. Para obtener más información sobre nuestras últimas iniciativas y prioridades, visite nuestro sitio web de relaciones con los inversores ESG.
Webcast Details
Satya Nadella, chairman and chief executive officer, Amy Hood, executive vice president and chief financial officer, Alice Jolla, chief accounting officer, Keith Dolliver, corporate secretary and deputy general counsel, and Brett Iversen, vice president of investor relations, will host a conference call and webcast at 2:30 p.m. Pacific time (5:30 p.m. Eastern time) today to discuss details of the company’s performance for the quarter and certain forward-looking information. The session may be accessed at http://www.microsoft.com/en-us/investor. The webcast will be available for replay through the close of business on October 24, 2024.
Constant Currency
Microsoft presents constant currency information to provide a framework for assessing how our underlying businesses performed excluding the effect of foreign currency rate fluctuations. To present this information, current and comparative prior period results for entities reporting in currencies other than United States dollars are converted into United States dollars using the average exchange rates from the comparative period rather than the actual exchange rates in effect during the respective periods. All growth comparisons relate to the corresponding period in the last fiscal year. Microsoft has provided this non-GAAP financial information to aid investors in better understanding our performance. The non-GAAP financial measures presented in this release should not be considered as a substitute for, or superior to, the measures of financial performance prepared in accordance with GAAP.
Financial Performance Constant Currency Reconciliation
Three Months Ended September 30,
($ in millions, except per share amounts) Revenue Operating Income Net Income Diluted Earnings per Share
2022 As Reported (GAAP) $50,122 $21,518 $17,556 $2.35
2023 As Reported (GAAP) $56,517 $26,895 $22,291 $2.99
Percentage Change Y/Y (GAAP) 13% 25% 27% 27%
Constant Currency Impact $301 $204 $148 $0.02
Percentage Change Y/Y Constant Currency 12% 24% 26% 26%
Segment Revenue Constant Currency Reconciliation
Three Months Ended September 30,
($ in millions) Productivity and Business Processes Intelligent Cloud More Personal Computing
2022 As Reported (GAAP) $16,465 $20,325 $13,332
2023 As Reported (GAAP) $18,592 $24,259 $13,666
Percentage Change Y/Y (GAAP) 13% 19% 3%
Constant Currency Impact $79 $156 $66
Variación porcentual a/año moneda constante 12% 19% 2%
Conciliación de moneda constante de ingresos por productos y servicios seleccionados
Tres meses finalizados el 30 de septiembre de 2023
Cambio porcentual interanual (GAAP) Impacto de la moneda constante Variación porcentual a/año moneda constante
Nube de Microsoft 24% (1)% 23%
Productos comerciales de oficina y servicios en la nube 15% (1)% 14%
Office 365 Comercial 18% (1)% 17%
Productos de consumo de Office y servicios en la nube 3% 1% 4%
LinkedIn (en inglés) 8% 0% 8%
Productos de Dynamics y servicios en la nube 22% (1)% 21%
Dynamics 365 28% (2)% 26%
Productos de servidor y servicios en la nube 21% 0% 21%
Azure y otros servicios en la nube 29% (1)% 28%
Windows 5% 0% 5%
Windows OEM 4% 0% 4%
Productos comerciales y servicios en la nube de Windows 8% 0% 8%
Dispositivos (22)% 0% (22)%
Contenido y servicios de Xbox 13% (1)% 12%
Publicidad de búsqueda y noticias, excluidos los costes de adquisición de tráfico 10% (1)% 9%
Acerca de Microsoft
Microsoft (Nasdaq "MSFT" @microsoft) permite la transformación digital para la era de una nube inteligente y un perímetro inteligente. Su misión es empoderar a todas las personas y organizaciones del planeta para que logren más.
Declaraciones prospectivas
Las declaraciones en este comunicado que son "declaraciones prospectivas" se basan en expectativas y suposiciones actuales que están sujetas a riesgos e incertidumbres. Los resultados reales podrían diferir materialmente debido a factores como:
una intensa competencia en todos nuestros mercados que puede conducir a menores ingresos o márgenes operativos;
un mayor enfoque en los servicios basados en la nube que presentan riesgos competitivos y de ejecución;
inversiones significativas en productos y servicios que pueden no alcanzar los rendimientos esperados;
adquisiciones, empresas conjuntas y alianzas estratégicas que puedan tener un efecto adverso en nuestro negocio;
deterioro del fondo de comercio o de activos intangibles amortizables que causen un cargo significativo a los resultados;
ciberataques y vulnerabilidades de seguridad que podrían conducir a una reducción de los ingresos, un aumento de los costes, reclamaciones de responsabilidad o daños a nuestra reputación o posición competitiva;
divulgación y uso indebido de datos personales que puedan causar responsabilidad y daño a nuestra reputación;
la posibilidad de que no podamos proteger la información almacenada en nuestros productos y servicios para que no la utilicen otros;
abuso de nuestras plataformas publicitarias, profesionales, de mercado o de juegos que puedan dañar nuestra reputación o la participación de los usuarios;
el desarrollo del internet de las cosas que presenta riesgos de seguridad, privacidad y ejecución;
cuestiones sobre el uso de la inteligencia artificial en nuestras ofertas que puedan dar lugar a daños a la reputación o a la competencia, o a responsabilidades legales;
interrupciones excesivas, pérdidas de datos e interrupciones de nuestros servicios en línea si no mantenemos una infraestructura operativa adecuada;
problemas de calidad o de abastecimiento;
litigios gubernamentales y actividades regulatorias relacionadas con las normas de competencia que pueden limitar la forma en que diseñamos y comercializamos nuestros productos;
las posibles consecuencias de las leyes comerciales y anticorrupción;
las posibles consecuencias de los requisitos legales y reglamentarios existentes y crecientes;
leyes y reglamentos relacionados con el manejo de datos personales que puedan impedir la adopción de nuestros servicios o resultar en un aumento de los costos, reclamos legales, multas o daños a la reputación;
reclamaciones contra nosotros que puedan dar lugar a resultados adversos en disputas legales;
incertidumbres relacionadas con nuestro negocio con clientes gubernamentales;
obligaciones tributarias adicionales;
la incapacidad de proteger y utilizar nuestra propiedad intelectual puede perjudicar nuestros resultados comerciales y operativos;
reclamaciones de que Microsoft ha infringido los derechos de propiedad intelectual de terceros;
daños a nuestra reputación o a nuestras marcas que puedan perjudicar nuestro negocio y nuestros resultados operativos;
condiciones económicas o de mercado adversas que puedan perjudicar nuestro negocio;
eventos catastróficos o condiciones geopolíticas, como la pandemia de COVID-19, que pueden interrumpir nuestro negocio;
exposición a mayores incertidumbres económicas y operativas derivadas de la operación de un negocio global, incluidos los efectos del cambio de moneda extranjera y
la dependencia de nuestro negocio de nuestra capacidad para atraer y retener empleados talentosos.
Para obtener más información sobre los riesgos e incertidumbres asociados con el negocio de Microsoft, consulte las secciones "Discusión y análisis de la administración de la situación financiera y los resultados de las operaciones" y "Factores de riesgo" de las presentaciones de Microsoft ante la SEC, incluidos, entre otros, su informe anual en el Formulario 10-K y los informes trimestrales en el Formulario 10-Q, cuyas copias se pueden obtener poniéndose en contacto con el departamento de Relaciones con los Inversores de Microsoft al (800) 285-7772 o en Relaciones con los inversores de Microsoft sitio web en http://www.microsoft.com/en-us/investor.
Toda la información de este comunicado corresponde al 30 de septiembre de 2023. La compañía no asume ninguna obligación de actualizar ninguna declaración prospectiva para ajustarla a los resultados reales o a los cambios en las expectativas de la compañía.
Para obtener más información, presione solamente:
Relaciones con los medios de comunicación de Microsoft, WE Communications for Microsoft, (425) 638-7777, [email protected]
Para obtener más información, solo analistas financieros e inversionistas:
Brett Iversen, Vicepresidente de Relaciones con Inversionistas, (425) 706-4400
Nota a los editores: Para obtener más información, noticias y perspectivas de Microsoft, visite el Centro de noticias de Microsoft en http://www.microsoft.com/news. Los enlaces web, los números de teléfono y los títulos eran correctos en el momento de la publicación, pero pueden haber cambiado desde entonces. Información financiera y de accionistas, así como la de hoy a las 2:30 p.m. La conferencia telefónica con inversores y analistas, hora del Pacífico, está disponible en http://www.microsoft.com/en-us/investor. | 0 |
promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language | promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language/analyze_documents/requirements.txt | promptflow
promptflow-tools
promptflow-azure-ai-language | 0 |
promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language | promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language/analyze_documents/flow.dag.yaml | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
environment:
python_requirements_txt: requirements.txt
inputs:
document_path:
type: string
default: ./document1.txt
language:
type: string
default: en
outputs:
extractive_summary:
type: string
reference: ${Extractive_Summarization.output}
abstractive_summary:
type: string
reference: ${Abstractive_Summarization.output}
sentiment:
type: string
reference: ${Sentiment_Analysis.output}
recognized_entities:
type: string
reference: ${Entity_Recognition.output}
nodes:
- name: Read_File
type: python
source:
type: code
path: read_file.py
inputs:
file_path: ${inputs.document_path}
- name: Translator
type: python
source:
type: package
tool: language_tools.tools.translator.get_translation
inputs:
connection: azure_ai_translator_connection
text: ${Read_File.output}
to:
- en
parse_response: true
- name: Parse_Translation
type: python
source:
type: code
path: parse_translation.py
inputs:
translation_results: ${Translator.output}
language: en
- name: PII_Entity_Recognition
type: python
source:
type: package
tool: language_tools.tools.pii_entity_recognition.get_pii_entity_recognition
inputs:
connection: azure_ai_language_connection
language: ${inputs.language}
text: ${Parse_Translation.output}
parse_response: true
categories:
- Address
- Age
- Date
- Email
- IPAddress
- PhoneNumber
- URL
- name: Abstractive_Summarization
type: python
source:
type: package
tool: language_tools.tools.abstractive_summarization.get_abstractive_summarization
inputs:
connection: azure_ai_language_connection
language: ${inputs.language}
text: ${PII_Entity_Recognition.output}
parse_response: true
query: quarterly results
summary_length: medium
- name: Sentiment_Analysis
type: python
source:
type: package
tool: language_tools.tools.sentiment_analysis.get_sentiment_analysis
inputs:
connection: azure_ai_language_connection
language: ${inputs.language}
text: ${Abstractive_Summarization.output}
parse_response: true
- name: Entity_Recognition
type: python
source:
type: package
tool: language_tools.tools.entity_recognition.get_entity_recognition
inputs:
connection: azure_ai_language_connection
language: ${inputs.language}
text: ${PII_Entity_Recognition.output}
parse_response: true
- name: Extractive_Summarization
type: python
source:
type: package
tool: language_tools.tools.extractive_summarization.get_extractive_summarization
inputs:
connection: azure_ai_language_connection
language: ${inputs.language}
text: ${PII_Entity_Recognition.output}
query: Cloud AI
parse_response: true
| 0 |
promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language | promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language/multi_intent_conversational_language_understanding/README.md | # Multi Intent Conversational Language Understanding
A flow that can be used to determine multiple intents in a user query leveraging an LLM with Conversational Language Understanding.
This sample flow utilizes Azure AI Language's Conversational Language Understanding to perform various analyses on text or documents. It performs:
- Breakdown of compound multi intent user queries into single user queries using an LLM.
- [Conversational Language Understanding](https://learn.microsoft.com/en-us/azure/ai-services/language-service/conversational-language-understanding/overview) on each of those single user queries.
See the [promptflow-azure-ai-language](https://github.com/microsoft/promptflow/blob/main/docs/integrations/tools/azure_ai_language_tool.md) tool package reference documentation for further information.
Tools used in this flow:
- `LLM` tool
- `conversational_language_understanding` tool from the `promptflow-azure-ai-language` package
Connections used in this flow:
- `Custom` connection
## Prerequisites
Install promptflow sdk and other dependencies:
```
pip install -r requirements.txt
```
## Setup connection
Prepare your [Azure AI Language Resource](https://azure.microsoft.com/en-us/products/ai-services/ai-language) first, and [create a Language Resource](https://portal.azure.com/#create/Microsoft.CognitiveServicesTextAnalytics) if necessary. Import the accompanying MediaPlayer.json into a CLU app, train the app and deploy. From your Language Resource, obtain its `api_key` and `endpoint`.
Create a connection to your Language Resource. The connection uses the `CustomConnection` schema:
```
# Override keys with --set to avoid yaml file changes
pf connection create -f ../connections/azure_ai_language.yml --set secrets.api_key=<your_api_key> configs.endpoint=<your_endpoint> name=azure_ai_language_connection
```
Ensure you have created the `azure_ai_language_connection`:
```
pf connection show -n azure_ai_language_connection
```
## Run flow
```
# Test with default input values in flow.dag.yaml:
pf flow test --flow .
```
### Flow description
The flow uses a `llm` node to break down compound user queries into simple user queries. For example, "Play some blues rock and turn up the volume" will be broken down to "["Play some blues rock", "Turn Up the volume"]".
This is then passed into the CLU tool to recognize intents and entities in each of the utterances.
### Contact
Please reach out to Abhishek Sen (<[email protected]>) or <[email protected]> with any issues. | 0 |
promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language | promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language/multi_intent_conversational_language_understanding/MediaPlayer.json | {
"projectFileVersion": "2022-10-01-preview",
"stringIndexType": "Utf16CodeUnit",
"metadata": {
"projectKind": "Conversation",
"settings": {
"confidenceThreshold": 0,
"normalizeCasing": false
},
"projectName": "MediaPlayer",
"multilingual": false,
"description": "",
"language": "en-us"
},
"assets": {
"projectKind": "Conversation",
"intents": [
{
"category": "None"
},
{
"category": "PlayMedia"
},
{
"category": "UpdateVolume"
}
],
"entities": [],
"utterances": [
{
"text": "Put the volume at maximum.",
"language": "en-us",
"intent": "UpdateVolume",
"entities": [],
"dataset": "Train"
},
{
"text": "Mute the audio.",
"language": "en-us",
"intent": "UpdateVolume",
"entities": [],
"dataset": "Train"
},
{
"text": "Decrease the sound.",
"language": "en-us",
"intent": "UpdateVolume",
"entities": [],
"dataset": "Train"
},
{
"text": "Can you adjust the volume to a comfortable level?",
"language": "en-us",
"intent": "UpdateVolume",
"entities": [],
"dataset": "Train"
},
{
"text": "Volume up to 80%.",
"language": "en-us",
"intent": "UpdateVolume",
"entities": [],
"dataset": "Train"
},
{
"text": "Set the volume to 50%.",
"language": "en-us",
"intent": "UpdateVolume",
"entities": [],
"dataset": "Train"
},
{
"text": "Make it quieter.",
"language": "en-us",
"intent": "UpdateVolume",
"entities": [],
"dataset": "Train"
},
{
"text": "Lower the volume.",
"language": "en-us",
"intent": "UpdateVolume",
"entities": [],
"dataset": "Train"
},
{
"text": "Turn up the sound.",
"language": "en-us",
"intent": "UpdateVolume",
"entities": [],
"dataset": "Train"
},
{
"text": "Increase the volume.",
"language": "en-us",
"intent": "UpdateVolume",
"entities": [],
"dataset": "Train"
},
{
"text": "Play the next episode of my podcast.",
"language": "en-us",
"intent": "PlayMedia",
"entities": [],
"dataset": "Train"
},
{
"text": "Play a random podcast.",
"language": "en-us",
"intent": "PlayMedia",
"entities": [],
"dataset": "Train"
},
{
"text": "Start playing the song \"Shape of You\" by Ed Sheeran.",
"language": "en-us",
"intent": "PlayMedia",
"entities": [],
"dataset": "Train"
},
{
"text": "Play the latest album by Guns n Roses.",
"language": "en-us",
"intent": "PlayMedia",
"entities": [],
"dataset": "Train"
},
{
"text": "Play some classical music.",
"language": "en-us",
"intent": "PlayMedia",
"entities": [],
"dataset": "Train"
},
{
"text": "Can you play a relaxing playlist?",
"language": "en-us",
"intent": "PlayMedia",
"entities": [],
"dataset": "Train"
},
{
"text": "Shuffle my playlist.",
"language": "en-us",
"intent": "PlayMedia",
"entities": [],
"dataset": "Train"
},
{
"text": "Play track number 5.",
"language": "en-us",
"intent": "PlayMedia",
"entities": [],
"dataset": "Train"
},
{
"text": "Play my favorite song.",
"language": "en-us",
"intent": "PlayMedia",
"entities": [],
"dataset": "Train"
},
{
"text": "Play Eric Clapton.",
"language": "en-us",
"intent": "PlayMedia",
"entities": [],
"dataset": "Train"
}
]
}
} | 0 |
promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language | promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language/multi_intent_conversational_language_understanding/chat.jinja2 | system:
Your task is to break down compound sentences into separate sentences.
For simple sentences just repeat the user input.
Remember to use a json array for the output.
user:
The output must be a json array.
Here are a few examples:
user input: Play Eric Clapton and turn down the volume.
OUTPUT: ["Play Eric Clapton.","Turn down the volume."]
user input: Play some Pink Floyd
OUTPUT: ["Play some Pink Floyd."]
user input: Change the radio station and turn on the seat heating.
OUTPUT: ["Change the radio station.","Turn on the seat heating."]
Process the given user input :
user input: {{question}}
OUTPUT: | 0 |
promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language | promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language/multi_intent_conversational_language_understanding/requirements.txt | promptflow
promptflow-tools
promptflow-azure-ai-language | 0 |
promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language | promptflow_repo/promptflow/examples/flows/integrations/azure-ai-language/multi_intent_conversational_language_understanding/flow.dag.yaml | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
environment:
python_requirements_txt: requirements.txt
inputs:
chat_history:
type: list
is_chat_history: true
utterance:
type: string
is_chat_input: true
default: Play BB King and increase the volume.
outputs:
intents:
type: string
reference: ${Conversational_Language_Understanding.output}
is_chat_output: true
nodes:
- name: LLM_Rewrite
type: llm
source:
type: code
path: chat.jinja2
inputs:
deployment_name: cluGPTTurbo
max_tokens: 256
temperature: 0.7
question: ${inputs.utterance}
connection: CLUGPTModel
api: chat
- name: Conversational_Language_Understanding
type: python
source:
type: package
tool: language_tools.tools.conversational_language_understanding.get_conversational_language_understanding
inputs:
connection: azure_ai_language_connection
language: en-us
utterances: ${LLM_Rewrite.output}
project_name: MediaPlayer
deployment_name: adv
parse_response: false
| 0 |
promptflow_repo/promptflow/examples/flows/standard | promptflow_repo/promptflow/examples/flows/standard/describe-image/data.jsonl | {"question": "How many colors are there in the image?", "input_image": {"data:image/png;url": "https://developer.microsoft.com/_devcom/images/logo-ms-social.png"}}
{"question": "What's this image about?", "input_image": {"data:image/png;url": "https://developer.microsoft.com/_devcom/images/404.png"}} | 0 |