repo_id
stringlengths
15
132
file_path
stringlengths
34
176
content
stringlengths
2
3.52M
__index_level_0__
int64
0
0
promptflow_repo/promptflow/examples/flows/chat
promptflow_repo/promptflow/examples/flows/chat/chat-with-wikipedia/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: chat_history: type: list default: [] question: type: string default: What is ChatGPT? is_chat_input: true outputs: answer: type: string reference: ${augmented_chat.output} is_chat_output: true nodes: - name: extract_query_from_question type: llm source: type: code path: extract_query_from_question.jinja2 inputs: # This is for easily switch between openai and azure openai. # deployment_name is required by azure openai, model is required by openai. deployment_name: gpt-35-turbo model: gpt-3.5-turbo temperature: '0.7' top_p: '1.0' stop: '' max_tokens: '256' presence_penalty: '0' frequency_penalty: '0' logit_bias: '' question: ${inputs.question} chat_history: ${inputs.chat_history} connection: open_ai_connection api: chat - name: get_wiki_url type: python source: type: code path: get_wiki_url.py inputs: entity: ${extract_query_from_question.output} count: '2' - name: search_result_from_url type: python source: type: code path: search_result_from_url.py inputs: url_list: ${get_wiki_url.output} count: '10' - name: process_search_result type: python source: type: code path: process_search_result.py inputs: search_result: ${search_result_from_url.output} - name: augmented_chat type: llm source: type: code path: augmented_chat.jinja2 inputs: # This is to easily switch between openai and azure openai. # deployment_name is required by azure openai, model is required by openai. deployment_name: gpt-35-turbo model: gpt-3.5-turbo temperature: '0.8' question: ${inputs.question} chat_history: ${inputs.chat_history} contexts: ${process_search_result.output} connection: open_ai_connection api: chat environment: python_requirements_txt: requirements.txt
0
promptflow_repo/promptflow/examples/flows/chat
promptflow_repo/promptflow/examples/flows/chat/chat-with-wikipedia/search_result_from_url.py
import random import time from concurrent.futures import ThreadPoolExecutor from functools import partial import bs4 import requests from promptflow import tool session = requests.Session() def decode_str(string): return string.encode().decode("unicode-escape").encode("latin1").decode("utf-8") def get_page_sentence(page, count: int = 10): # find all paragraphs paragraphs = page.split("\n") paragraphs = [p.strip() for p in paragraphs if p.strip()] # find all sentence sentences = [] for p in paragraphs: sentences += p.split(". ") sentences = [s.strip() + "." for s in sentences if s.strip()] # get first `count` number of sentences return " ".join(sentences[:count]) def fetch_text_content_from_url(url: str, count: int = 10): # Send a request to the URL try: headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35" } delay = random.uniform(0, 0.5) time.sleep(delay) response = session.get(url, headers=headers) if response.status_code == 200: # Parse the HTML content using BeautifulSoup soup = bs4.BeautifulSoup(response.text, "html.parser") page_content = [p_ul.get_text().strip() for p_ul in soup.find_all("p") + soup.find_all("ul")] page = "" for content in page_content: if len(content.split(" ")) > 2: page += decode_str(content) if not content.endswith("\n"): page += "\n" text = get_page_sentence(page, count=count) return (url, text) else: msg = ( f"Get url failed with status code {response.status_code}.\nURL: {url}\nResponse: " f"{response.text[:100]}" ) print(msg) return (url, "No available content") except Exception as e: print("Get url failed with error: {}".format(e)) return (url, "No available content") @tool def search_result_from_url(url_list: list, count: int = 10): results = [] partial_func_of_fetch_text_content_from_url = partial(fetch_text_content_from_url, count=count) with ThreadPoolExecutor(max_workers=5) as executor: futures = executor.map(partial_func_of_fetch_text_content_from_url, url_list) for feature in futures: results.append(feature) return results
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-perceived-intelligence/parse_score.py
from promptflow import tool import re @tool def parse_score(gpt_score: str): return float(extract_float(gpt_score)) def extract_float(s): match = re.search(r"[-+]?\d*\.\d+|\d+", s) if match: return float(match.group()) else: return None
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-perceived-intelligence/data.jsonl
{"question": "What is the name of the new language representation model introduced in the document?", "variant_id": "v1", "line_number":1, "answer":"The document mentions multiple language representation models, so it is unclear which one is being referred to as \"new\". Can you provide more specific information or context?","context":["statistical language modeling. arXiv preprint arXiv:1312.3005 . Z. Chen, H. Zhang, X. Zhang, and L. Zhao. 2018. Quora question pairs. Christopher Clark and Matt Gardner. 2018. Simple and effective multi-paragraph reading comprehen- sion. In ACL.Kevin Clark, Minh-Thang Luong, Christopher D Man- ning, and Quoc Le. 2018. Semi-supervised se- quence modeling with cross-view training. In Pro- ceedings of the 2018 Conference on Empirical Meth- ods in Natural Language Processing , pages 1914\u2013 1925. Ronan Collobert and Jason Weston. 2008. A uni\ufb01ed architecture for natural language processing: Deep neural networks with multitask learning. In Pro- ceedings of the 25th international conference on Machine learning , pages 160\u2013167. ACM. Alexis Conneau, Douwe Kiela, Holger Schwenk, Lo \u00a8\u0131c Barrault, and Antoine Bordes. 2017. Supervised learning of universal sentence representations from natural language inference data. In Proceedings of the 2017 Conference on Empirical Methods in Nat- ural Language Processing , pages 670\u2013680, Copen- hagen, Denmark. Association for Computational Linguistics. Andrew M Dai and Quoc V Le. 2015. Semi-supervised sequence learning. In Advances in neural informa- tion processing systems , pages 3079\u20133087. J. Deng, W. Dong, R. Socher, L.-J. Li, K. Li, and L. Fei- Fei. 2009. ImageNet: A Large-Scale Hierarchical Image Database. In CVPR09 . William B Dolan and Chris Brockett. 2005. Automati- cally constructing a corpus of sentential paraphrases. InProceedings of the Third International Workshop on Paraphrasing (IWP2005) . William Fedus, Ian Goodfellow, and Andrew M Dai. 2018. Maskgan: Better text generation via \ufb01lling in the.arXiv preprint arXiv:1801.07736 . Dan Hendrycks and Kevin Gimpel. 2016. Bridging nonlinearities and stochastic regularizers with gaus- sian error linear units. CoRR , abs\/1606.08415. Felix Hill, Kyunghyun Cho, and Anna Korhonen. 2016. Learning distributed representations of sentences from unlabelled data. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies . Association for Computa- tional Linguistics. Jeremy Howard and Sebastian Ruder. 2018. Universal language model \ufb01ne-tuning for text classi\ufb01cation. In ACL. Association for Computational Linguistics. Minghao Hu, Yuxing Peng, Zhen Huang, Xipeng Qiu, Furu Wei, and Ming Zhou. 2018. Reinforced mnemonic reader for machine reading comprehen- sion. In IJCAI . Yacine Jernite, Samuel R. Bowman, and David Son- tag. 2017. Discourse-based objectives for fast un- supervised sentence representation learning. CoRR , abs\/1705.00557.Mandar Joshi, Eunsol Choi, Daniel S Weld, and Luke Zettlemoyer. 2017. Triviaqa: A large scale distantly supervised challenge dataset for reading comprehen- sion. In ACL. Ryan Kiros, Yukun Zhu, Ruslan R Salakhutdinov, Richard Zemel, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. 2015. Skip-thought vectors. In Advances in neural information processing systems , pages 3294\u20133302. Quoc Le and Tomas Mikolov. 2014. Distributed rep- resentations of sentences and documents. In Inter- national Conference on Machine Learning , pages 1188\u20131196. Hector J Levesque, Ernest Davis, and Leora Morgen- stern. 2011. The winograd schema challenge. In Aaai spring symposium: Logical formalizations of commonsense reasoning , volume 46, page 47. Lajanugen Logeswaran and Honglak Lee. 2018. An ef\ufb01cient framework for learning sentence represen- tations. In International Conference on Learning Representations . Bryan McCann, James Bradbury, Caiming Xiong, and Richard Socher. 2017. Learned in translation: Con-","tool for measuring readability. Journalism Bulletin , 30(4):415\u2013433. Erik F Tjong Kim Sang and Fien De Meulder. 2003. Introduction to the conll-2003 shared task: Language-independent named entity recognition. In CoNLL . Joseph Turian, Lev Ratinov, and Yoshua Bengio. 2010. Word representations: A simple and general method for semi-supervised learning. In Proceedings of the 48th Annual Meeting of the Association for Compu- tational Linguistics , ACL \u201910, pages 384\u2013394. Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems , pages 6000\u20136010. Pascal Vincent, Hugo Larochelle, Yoshua Bengio, and Pierre-Antoine Manzagol. 2008. Extracting and composing robust features with denoising autoen- coders. In Proceedings of the 25th international conference on Machine learning , pages 1096\u20131103. ACM. Alex Wang, Amanpreet Singh, Julian Michael, Fe- lix Hill, Omer Levy, and Samuel Bowman. 2018a. Glue: A multi-task benchmark and analysis platformfor natural language understanding. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: An- alyzing and Interpreting Neural Networks for NLP , pages 353\u2013355. Wei Wang, Ming Yan, and Chen Wu. 2018b. Multi- granularity hierarchical attention fusion networks for reading comprehension and question answering. InProceedings of the 56th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers) . Association for Computational Lin- guistics. Alex Warstadt, Amanpreet Singh, and Samuel R Bow- man. 2018. Neural network acceptability judg- ments. arXiv preprint arXiv:1805.12471 . Adina Williams, Nikita Nangia, and Samuel R Bow- man. 2018. A broad-coverage challenge corpus for sentence understanding through inference. In NAACL . Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, et al. 2016. Google\u2019s neural ma- chine translation system: Bridging the gap between human and machine translation. arXiv preprint arXiv:1609.08144 . Jason Yosinski, Jeff Clune, Yoshua Bengio, and Hod Lipson. 2014. How transferable are features in deep neural networks? In Advances in neural information processing systems , pages 3320\u20133328. Adams Wei Yu, David Dohan, Minh-Thang Luong, Rui Zhao, Kai Chen, Mohammad Norouzi, and Quoc V Le. 2018. QANet: Combining local convolution with global self-attention for reading comprehen- sion. In ICLR . Rowan Zellers, Yonatan Bisk, Roy Schwartz, and Yejin Choi. 2018. Swag: A large-scale adversarial dataset for grounded commonsense inference. In Proceed- ings of the 2018 Conference on Empirical Methods in Natural Language Processing (EMNLP) . Yukun Zhu, Ryan Kiros, Rich Zemel, Ruslan Salakhut- dinov, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. 2015. Aligning books and movies: Towards story-like visual explanations by watching movies and reading books. In Proceedings of the IEEE international conference on computer vision , pages 19\u201327. Appendix for \u201cBERT: Pre-training of Deep Bidirectional Transformers for Language Understanding\u201d We organize the appendix into three sections: \u2022 Additional implementation details for BERT are presented in Appendix A;\u2022 Additional details for our experiments are presented in Appendix B; and \u2022 Additional ablation studies are presented in Appendix C. We present additional ablation studies for BERT including: \u2013Effect of Number of Training Steps; and \u2013Ablation for Different"]} {"question": "What is the main difference between BERT and previous language representation models?", "variant_id": "v1", "line_number": 2, "answer":"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers, allowing it to incorporate context from both directions. This is unlike previous language representation models that are unidirectional, which limits the choice of architectures that can be used during pre-training and could be sub-optimal for sentence-level tasks and token-level tasks such as question answering.","context":["BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding Jacob Devlin Ming-Wei Chang Kenton Lee Kristina Toutanova Google AI Language fjacobdevlin,mingweichang,kentonl,kristout [email protected] Abstract We introduce a new language representa- tion model called BERT , which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language repre- sentation models (Peters et al., 2018a; Rad- ford et al., 2018), BERT is designed to pre- train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a re- sult, the pre-trained BERT model can be \ufb01ne- tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task- speci\ufb01c architecture modi\ufb01cations. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art re- sults on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answer- ing Test F1 to 93.2 (1.5 point absolute im- provement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement). 1 Introduction Language model pre-training has been shown to be effective for improving many natural language processing tasks (Dai and Le, 2015; Peters et al., 2018a; Radford et al., 2018; Howard and Ruder, 2018). These include sentence-level tasks such as natural language inference (Bowman et al., 2015; Williams et al., 2018) and paraphrasing (Dolan and Brockett, 2005), which aim to predict the re- lationships between sentences by analyzing them holistically, as well as token-level tasks such as named entity recognition and question answering, where models are required to produce \ufb01ne-grained output at the token level (Tjong Kim Sang and De Meulder, 2003; Rajpurkar et al., 2016).There are two existing strategies for apply- ing pre-trained language representations to down- stream tasks: feature-based and\ufb01ne-tuning . The feature-based approach, such as ELMo (Peters et al., 2018a), uses task-speci\ufb01c architectures that include the pre-trained representations as addi- tional features. The \ufb01ne-tuning approach, such as the Generative Pre-trained Transformer (OpenAI GPT) (Radford et al., 2018), introduces minimal task-speci\ufb01c parameters, and is trained on the downstream tasks by simply \ufb01ne-tuning allpre- trained parameters. The two approaches share the same objective function during pre-training, where they use unidirectional language models to learn general language representations. We argue that current techniques restrict the power of the pre-trained representations, espe- cially for the \ufb01ne-tuning approaches. The ma- jor limitation is that standard language models are unidirectional, and this limits the choice of archi- tectures that can be used during pre-training. For example, in OpenAI GPT, the authors use a left-to- right architecture, where every token can only at- tend to previous tokens in the self-attention layers of the Transformer (Vaswani et al., 2017). Such re- strictions are sub-optimal for sentence-level tasks, and could be very harmful when applying \ufb01ne- tuning based approaches to token-level tasks such as question answering, where it is crucial to incor- porate context from both directions. In this paper, we improve the \ufb01ne-tuning based approaches by proposing BERT: Bidirectional Encoder Representations from Transformers.","the self-attention layers of the Transformer (Vaswani et al., 2017). Such re- strictions are sub-optimal for sentence-level tasks, and could be very harmful when applying \ufb01ne- tuning based approaches to token-level tasks such as question answering, where it is crucial to incor- porate context from both directions. In this paper, we improve the \ufb01ne-tuning based approaches by proposing BERT: Bidirectional Encoder Representations from Transformers. BERT alleviates the previously mentioned unidi- rectionality constraint by using a \u201cmasked lan- guage model\u201d (MLM) pre-training objective, in- spired by the Cloze task (Taylor, 1953). The masked language model randomly masks some of the tokens from the input, and the objective is to predict the original vocabulary id of the maskedarXiv:1810.04805v2 [cs.CL] 24 May 2019word based only on its context. Unlike left-to- right language model pre-training, the MLM ob- jective enables the representation to fuse the left and the right context, which allows us to pre- train a deep bidirectional Transformer. In addi- tion to the masked language model, we also use a \u201cnext sentence prediction\u201d task that jointly pre- trains text-pair representations. The contributions of our paper are as follows: \u2022 We demonstrate the importance of bidirectional pre-training for language representations. Un- like Radford et al. (2018), which uses unidirec- tional language models for pre-training, BERT uses masked language models to enable pre- trained deep bidirectional representations. This is also in contrast to Peters et al. (2018a), which uses a shallow concatenation of independently trained left-to-right and right-to-left LMs. \u2022 We show that pre-trained representations reduce the need for many heavily-engineered task- speci\ufb01c architectures. BERT is the \ufb01rst \ufb01ne- tuning based representation model that achieves state-of-the-art performance on a large suite of sentence-level andtoken-level tasks, outper- forming many task-speci\ufb01c architectures. \u2022 BERT advances the state of the art for eleven NLP tasks. The code and pre-trained mod- els are available at https:\/\/github.com\/ google-research\/bert . 2 Related Work There is a long history of pre-training general lan- guage representations, and we brie\ufb02y review the most widely-used approaches in this section. 2.1 Unsupervised Feature-based Approaches Learning widely applicable representations of words has been an active area of research for decades, including non-neural (Brown et al., 1992; Ando and Zhang, 2005; Blitzer et al., 2006) and neural (Mikolov et al., 2013; Pennington et al., 2014) methods. Pre-trained word embeddings are an integral part of modern NLP systems, of- fering signi\ufb01cant improvements over embeddings learned from scratch (Turian et al., 2010). To pre- train word embedding vectors, left-to-right lan- guage modeling objectives have been used (Mnih and Hinton, 2009), as well as objectives to dis- criminate correct from incorrect words in left and right context (Mikolov et al., 2013).These approaches have been generalized to coarser granularities, such as sentence embed- dings (Kiros et al., 2015; Logeswaran and Lee, 2018) or paragraph embeddings (Le and Mikolov, 2014). To train sentence representations, prior work has used objectives to rank candidate next sentences (Jernite et al., 2017; Logeswaran and Lee, 2018), left-to-right generation of next sen- tence words given a representation of the previous sentence (Kiros et al., 2015), or denoising auto- encoder","the token-level classi\ufb01er over the NER label set. To ablate the \ufb01ne-tuning approach, we apply the feature-based approach by extracting the activa- tions from one or more layers without \ufb01ne-tuning any parameters of BERT. These contextual em- beddings are used as input to a randomly initial- ized two-layer 768-dimensional BiLSTM before the classi\ufb01cation layer. Results are presented in Table 7. BERT LARGE performs competitively with state-of-the-art meth- ods. The best performing method concatenates the token representations from the top four hidden lay- ers of the pre-trained Transformer, which is only 0.3 F1 behind \ufb01ne-tuning the entire model. This demonstrates that BERT is effective for both \ufb01ne- tuning and feature-based approaches. 6 Conclusion Recent empirical improvements due to transfer learning with language models have demonstrated that rich, unsupervised pre-training is an integral part of many language understanding systems. In particular, these results enable even low-resource tasks to bene\ufb01t from deep unidirectional architec- tures. Our major contribution is further general- izing these \ufb01ndings to deep bidirectional architec- tures, allowing the same pre-trained model to suc- cessfully tackle a broad set of NLP tasks.References Alan Akbik, Duncan Blythe, and Roland V ollgraf. 2018. Contextual string embeddings for sequence labeling. In Proceedings of the 27th International Conference on Computational Linguistics , pages 1638\u20131649. Rami Al-Rfou, Dokook Choe, Noah Constant, Mandy Guo, and Llion Jones. 2018. Character-level lan- guage modeling with deeper self-attention. arXiv preprint arXiv:1808.04444 . Rie Kubota Ando and Tong Zhang. 2005. A framework for learning predictive structures from multiple tasks and unlabeled data. Journal of Machine Learning Research , 6(Nov):1817\u20131853. Luisa Bentivogli, Bernardo Magnini, Ido Dagan, Hoa Trang Dang, and Danilo Giampiccolo. 2009. The \ufb01fth PASCAL recognizing textual entailment challenge. In TAC. NIST. John Blitzer, Ryan McDonald, and Fernando Pereira. 2006. Domain adaptation with structural correspon- dence learning. In Proceedings of the 2006 confer- ence on empirical methods in natural language pro- cessing , pages 120\u2013128. Association for Computa- tional Linguistics. Samuel R. Bowman, Gabor Angeli, Christopher Potts, and Christopher D. Manning. 2015. A large anno- tated corpus for learning natural language inference. InEMNLP . Association for Computational Linguis- tics. Peter F Brown, Peter V Desouza, Robert L Mercer, Vincent J Della Pietra, and Jenifer C Lai. 1992. Class-based n-gram models of natural language. Computational linguistics , 18(4):467\u2013479. Daniel Cer, Mona Diab, Eneko Agirre, Inigo Lopez- Gazpio, and Lucia Specia. 2017. Semeval-2017 task 1: Semantic textual similarity multilingual and crosslingual focused evaluation. In Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017) , pages 1\u201314, Vancou- ver, Canada. Association for Computational Lin- guistics. Ciprian Chelba, Tomas Mikolov, Mike Schuster, Qi Ge, Thorsten Brants, Phillipp Koehn, and Tony Robin- son. 2013. One billion word benchmark for measur- ing progress in statistical language modeling. arXiv preprint arXiv:1312.3005 . Z. Chen, H. Zhang, X. Zhang, and L. Zhao. 2018. Quora question pairs. Christopher Clark and Matt Gardner. 2018. Simple and effective multi-paragraph reading comprehen- sion. In ACL.Kevin Clark, Minh-Thang Luong, Christopher D Man- ning, and Quoc Le. 2018. Semi-supervised se- quence modeling with cross-view training. In Pro- ceedings of the 2018 Conference on Empirical Meth- ods"]}
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-perceived-intelligence/gpt_perceived_intelligence.md
user: # Instructions * There are many chatbots that can answer users questions based on the context given from different sources like search results, or snippets from books/papers. They try to understand users's question and then get context by either performing search from search engines, databases or books/papers for relevant content. Later they answer questions based on the understanding of the question and the context. * Perceived intelligence is the degree to which a bot can impress the user with its responses, by showing originality, insight, creativity, knowledge, and adaptability. Perceived intelligence can be influenced by various factors, such as the content, tone, style, and structure of the bot's responses, the relevance, coherence, and accuracy of the information the bot provides, the creativity, originality, and wit of the bot's expressions, the depth, breadth, and insight of the bot's knowledge, and the ability of the bot to adapt, learn, and use feedback. * Your goal is to score the answer for given question and context from 1 to 10 based on perceived intelligence described above: * Score 10 means the answer is excellent for perceived intelligence * Score 1 means the answer is poor for perceived intelligence * Score 5 means the answer is normal for perceived intelligence * Just respond with the score, nothing else. # Real work ## Question {{question}} ## Answer {{answer}} ## Context {{context}} ## Score
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-perceived-intelligence/requirements.txt
promptflow promptflow-tools
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-perceived-intelligence/aggregate.py
from typing import List from promptflow import tool @tool def aggregate(perceived_intelligence_score: List[float]): aggregated_results = {"perceived_intelligence_score": 0.0, "count": 0} # Calculate average perceived_intelligence_score for i in range(len(perceived_intelligence_score)): aggregated_results["perceived_intelligence_score"] += perceived_intelligence_score[i] aggregated_results["count"] += 1 aggregated_results["perceived_intelligence_score"] /= aggregated_results["count"] # Log metric for each variant from promptflow import log_metric log_metric(key="perceived_intelligence_score", value=aggregated_results["perceived_intelligence_score"]) return aggregated_results
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-perceived-intelligence/README.md
# Perceived Intelligence Evaluation This is a flow leverage llm to eval perceived intelligence. Perceived intelligence is the degree to which a bot can impress the user with its responses, by showing originality, insight, creativity, knowledge, and adaptability. Tools used in this flow: - `python` tool - built-in `llm` tool ### 0. Setup connection Prepare your Azure Open AI resource follow this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one. ```bash # Override keys with --set to avoid yaml file changes pf connection create --file ../../../connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base> ``` ### 1. Test flow/node ```bash # test with default input value in flow.dag.yaml pf flow test --flow . ``` ### 2. create flow run with multi line data ```bash pf run create --flow . --data ./data.jsonl --column-mapping question='${data.question}' answer='${data.answer}' context='${data.context}' --stream ``` You can also skip providing `column-mapping` if provided data has same column name as the flow. Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI.
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-perceived-intelligence/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json environment: python_requirements_txt: requirements.txt inputs: question: type: string default: What is the name of the new language representation model introduced in the document? answer: type: string default: The document mentions multiple language representation models, so it is unclear which one is being referred to as \"new\". Can you provide more specific information or context? context: type: string default: '["statistical language modeling. arXiv preprint arXiv:1312.3005 . Z. Chen, H. Zhang, X. Zhang, and L. Zhao. 2018. Quora question pairs. Christopher Clark and Matt Gardner. 2018. Simple and effective multi-paragraph reading comprehen- sion. In ACL.Kevin Clark, Minh-Thang Luong, Christopher D Man- ning, and Quoc Le. 2018. Semi-supervised se- quence modeling with cross-view training. In Pro- ceedings of the 2018 Conference on Empirical Meth- ods in Natural Language Processing , pages 1914\u2013 1925. Ronan Collobert and Jason Weston. 2008. A uni\ufb01ed architecture for natural language processing: Deep neural networks with multitask learning. In Pro- ceedings of the 25th international conference on Machine learning , pages 160\u2013167. ACM. Alexis Conneau, Douwe Kiela, Holger Schwenk, Lo \u00a8\u0131c Barrault, and Antoine Bordes. 2017. Supervised learning of universal sentence representations from natural language inference data. In Proceedings of the 2017 Conference on Empirical Methods in Nat- ural Language Processing , pages 670\u2013680, Copen- hagen, Denmark. Association for Computational Linguistics. Andrew M Dai and Quoc V Le. 2015. Semi-supervised sequence learning. In Advances in neural informa- tion processing systems , pages 3079\u20133087. J. Deng, W. Dong, R. Socher, L.-J. Li, K. Li, and L. Fei- Fei. 2009. ImageNet: A Large-Scale Hierarchical Image Database. In CVPR09 . William B Dolan and Chris Brockett. 2005. Automati- cally constructing a corpus of sentential paraphrases. InProceedings of the Third International Workshop on Paraphrasing (IWP2005) . William Fedus, Ian Goodfellow, and Andrew M Dai. 2018. Maskgan: Better text generation via \ufb01lling in the.arXiv preprint arXiv:1801.07736 . Dan Hendrycks and Kevin Gimpel. 2016. Bridging nonlinearities and stochastic regularizers with gaussian error linear units. CoRR , abs\/1606.08415. Felix Hill, Kyunghyun Cho, and Anna Korhonen. 2016. Learning distributed representations of sentences from unlabelled data. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies . Association for Computa- tional Linguistics. Jeremy Howard and Sebastian Ruder. 2018. Universal language model \ufb01ne-tuning for text classi\ufb01cation. In ACL. Association for Computational Linguistics. Minghao Hu, Yuxing Peng, Zhen Huang, Xipeng Qiu, Furu Wei, and Ming Zhou. 2018. Reinforced mnemonic reader for machine reading comprehen- sion. In IJCAI . Yacine Jernite, Samuel R. Bowman, and David Son- tag. 2017. Discourse-based objectives for fast un- supervised sentence representation learning. CoRR , abs\/1705.00557.Mandar Joshi, Eunsol Choi, Daniel S Weld, and Luke Zettlemoyer. 2017. Triviaqa: A large scale distantly supervised challenge dataset for reading comprehen- sion. In ACL. Ryan Kiros, Yukun Zhu, Ruslan R Salakhutdinov, Richard Zemel, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. 2015. Skip-thought vectors. In Advances in neural information processing systems , pages 3294\u20133302. Quoc Le and Tomas Mikolov. 2014. Distributed rep- resentations of sentences and documents. In Inter- national Conference on Machine Learning , pages 1188\u20131196. Hector J Levesque, Ernest Davis, and Leora Morgen- stern. 2011. The winograd schema challenge. In Aaai spring symposium: Logical formalizations of commonsense reasoning , volume 46, page 47. Lajanugen Logeswaran and Honglak Lee. 2018. An ef\ufb01cient framework for learning sentence represen- tations. In International Conference on Learning Representations . Bryan McCann, James Bradbury, Caiming Xiong, and Richard Socher. 2017. Learned in translation: Con-","tool for measuring readability. Journalism Bulletin , 30(4):415\u2013433. Erik F Tjong Kim Sang and Fien De Meulder. 2003. Introduction to the conll-2003 shared task: Language-independent named entity recognition. In CoNLL . Joseph Turian, Lev Ratinov, and Yoshua Bengio. 2010. Word representations: A simple and general method for semi-supervised learning. In Proceedings of the 48th Annual Meeting of the Association for Compu- tational Linguistics , ACL \u201910, pages 384\u2013394. Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems , pages 6000\u20136010. Pascal Vincent, Hugo Larochelle, Yoshua Bengio, and Pierre-Antoine Manzagol. 2008. Extracting and composing robust features with denoising autoen- coders. In Proceedings of the 25th international conference on Machine learning , pages 1096\u20131103. ACM. Alex Wang, Amanpreet Singh, Julian Michael, Fe- lix Hill, Omer Levy, and Samuel Bowman. 2018a. Glue: A multi-task benchmark and analysis platformfor natural language understanding. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: An- alyzing and Interpreting Neural Networks for NLP , pages 353\u2013355. Wei Wang, Ming Yan, and Chen Wu. 2018b. Multi- granularity hierarchical attention fusion networks for reading comprehension and question answering. InProceedings of the 56th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers) . Association for Computational Lin- guistics. Alex Warstadt, Amanpreet Singh, and Samuel R Bow- man. 2018. Neural network acceptability judg- ments. arXiv preprint arXiv:1805.12471 . Adina Williams, Nikita Nangia, and Samuel R Bow- man. 2018. A broad-coverage challenge corpus for sentence understanding through inference. In NAACL . Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, et al. 2016. Google\u2019s neural ma- chine translation system: Bridging the gap between human and machine translation. arXiv preprint arXiv:1609.08144 . Jason Yosinski, Jeff Clune, Yoshua Bengio, and Hod Lipson. 2014. How transferable are features in deep neural networks? In Advances in neural information processing systems , pages 3320\u20133328. Adams Wei Yu, David Dohan, Minh-Thang Luong, Rui Zhao, Kai Chen, Mohammad Norouzi, and Quoc V Le. 2018. QANet: Combining local convolution with global self-attention for reading comprehen- sion. In ICLR . Rowan Zellers, Yonatan Bisk, Roy Schwartz, and Yejin Choi. 2018. Swag: A large-scale adversarial dataset for grounded commonsense inference. In Proceed- ings of the 2018 Conference on Empirical Methods in Natural Language Processing (EMNLP) . Yukun Zhu, Ryan Kiros, Rich Zemel, Ruslan Salakhut- dinov, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. 2015. Aligning books and movies: Towards story-like visual explanations by watching movies and reading books. In Proceedings of the IEEE international conference on computer vision , pages 19\u201327. Appendix for \u201cBERT: Pre-training of Deep Bidirectional Transformers for Language Understanding\u201d We organize the appendix into three sections: \u2022 Additional implementation details for BERT are presented in Appendix A;\u2022 Additional details for our experiments are presented in Appendix B; and \u2022 Additional ablation studies are presented in Appendix C. We present additional ablation studies for BERT including: \u2013Effect of Number of Training Steps; and \u2013Ablation for Different"]} {"question": "What is the main difference between BERT and previous language representation models?", "variant_id": "v1", "line_number": 2, answer":"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers, allowing it to incorporate context from both directions. This is unlike previous language representation models that are unidirectional, which limits the choice of architectures that can be used during pre-training and could be sub-optimal for sentence-level tasks and token-level tasks such as question answering.","context":["BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding Jacob Devlin Ming-Wei Chang Kenton Lee Kristina Toutanova Google AI Language fjacobdevlin,mingweichang,kentonl,kristout [email protected] Abstract We introduce a new language representa- tion model called BERT , which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language repre- sentation models (Peters et al., 2018a; Rad- ford et al., 2018), BERT is designed to pre- train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a re- sult, the pre-trained BERT model can be \ufb01ne- tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task- speci\ufb01c architecture modi\ufb01cations. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art re- sults on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answer- ing Test F1 to 93.2 (1.5 point absolute im- provement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement). 1 Introduction Language model pre-training has been shown to be effective for improving many natural language processing tasks (Dai and Le, 2015; Peters et al., 2018a; Radford et al., 2018; Howard and Ruder, 2018). These include sentence-level tasks such as natural language inference (Bowman et al., 2015; Williams et al., 2018) and paraphrasing (Dolan and Brockett, 2005), which aim to predict the re- lationships between sentences by analyzing them holistically, as well as token-level tasks such as named entity recognition and question answering, where models are required to produce \ufb01ne-grained output at the token level (Tjong Kim Sang and De Meulder, 2003; Rajpurkar et al., 2016).There are two existing strategies for apply- ing pre-trained language representations to down- stream tasks: feature-based and\ufb01ne-tuning . The feature-based approach, such as ELMo (Peters et al., 2018a), uses task-speci\ufb01c architectures that include the pre-trained representations as addi- tional features. The \ufb01ne-tuning approach, such as the Generative Pre-trained Transformer (OpenAI GPT) (Radford et al., 2018), introduces minimal task-speci\ufb01c parameters, and is trained on the downstream tasks by simply \ufb01ne-tuning allpre- trained parameters. The two approaches share the same objective function during pre-training, where they use unidirectional language models to learn general language representations. We argue that current techniques restrict the power of the pre-trained representations, espe- cially for the \ufb01ne-tuning approaches. The ma- jor limitation is that standard language models are unidirectional, and this limits the choice of archi- tectures that can be used during pre-training. For example, in OpenAI GPT, the authors use a left-to- right architecture, where every token can only at- tend to previous tokens in the self-attention layers of the Transformer (Vaswani et al., 2017). Such re- strictions are sub-optimal for sentence-level tasks, and could be very harmful when applying \ufb01ne- tuning based approaches to token-level tasks such as question answering, where it is crucial to incor- porate context from both directions. In this paper, we improve the \ufb01ne-tuning based approaches by proposing BERT: Bidirectional Encoder Representations from Transformers.","the self-attention layers of the Transformer (Vaswani et al., 2017). Such re- strictions are sub-optimal for sentence-level tasks, and could be very harmful when applying \ufb01ne- tuning based approaches to token-level tasks such as question answering, where it is crucial to incor- porate context from both directions. In this paper, we improve the \ufb01ne-tuning based approaches by proposing BERT: Bidirectional Encoder Representations from Transformers. BERT alleviates the previously mentioned unidi- rectionality constraint by using a \u201cmasked lan- guage model\u201d (MLM) pre-training objective, in- spired by the Cloze task (Taylor, 1953). The masked language model randomly masks some of the tokens from the input, and the objective is to predict the original vocabulary id of the maskedarXiv:1810.04805v2 [cs.CL] 24 May 2019word based only on its context. Unlike left-to- right language model pre-training, the MLM ob- jective enables the representation to fuse the left and the right context, which allows us to pre- train a deep bidirectional Transformer. In addi- tion to the masked language model, we also use a \u201cnext sentence prediction\u201d task that jointly pre- trains text-pair representations. The contributions of our paper are as follows: \u2022 We demonstrate the importance of bidirectional pre-training for language representations. Un- like Radford et al. (2018), which uses unidirec- tional language models for pre-training, BERT uses masked language models to enable pre- trained deep bidirectional representations. This is also in contrast to Peters et al. (2018a), which uses a shallow concatenation of independently trained left-to-right and right-to-left LMs. \u2022 We show that pre-trained representations reduce the need for many heavily-engineered task- speci\ufb01c architectures. BERT is the \ufb01rst \ufb01ne- tuning based representation model that achieves state-of-the-art performance on a large suite of sentence-level andtoken-level tasks, outper- forming many task-speci\ufb01c architectures. \u2022 BERT advances the state of the art for eleven NLP tasks. The code and pre-trained mod- els are available at https:\/\/github.com\/ google-research\/bert . 2 Related Work There is a long history of pre-training general lan- guage representations, and we brie\ufb02y review the most widely-used approaches in this section. 2.1 Unsupervised Feature-based Approaches Learning widely applicable representations of words has been an active area of research for decades, including non-neural (Brown et al., 1992; Ando and Zhang, 2005; Blitzer et al., 2006) and neural (Mikolov et al., 2013; Pennington et al., 2014) methods. Pre-trained word embeddings are an integral part of modern NLP systems, of- fering signi\ufb01cant improvements over embeddings learned from scratch (Turian et al., 2010). To pre- train word embedding vectors, left-to-right lan- guage modeling objectives have been used (Mnih and Hinton, 2009), as well as objectives to dis- criminate correct from incorrect words in left and right context (Mikolov et al., 2013).These approaches have been generalized to coarser granularities, such as sentence embed- dings (Kiros et al., 2015; Logeswaran and Lee, 2018). "]' outputs: perceived_intelligence_score: type: string reference: ${parse_score.output} nodes: - name: parse_score type: python source: type: code path: parse_score.py inputs: gpt_score: ${gpt_perceived_intelligence.output} - name: aggregate type: python source: type: code path: aggregate.py inputs: perceived_intelligence_score: ${parse_score.output} aggregation: true - name: gpt_perceived_intelligence type: llm source: type: code path: gpt_perceived_intelligence.md inputs: # This is to easily switch between openai and azure openai. # deployment_name is required by azure openai, model is required by openai. deployment_name: gpt-4 model: gpt-4 max_tokens: 5 answer: ${inputs.answer} question: ${inputs.question} context: ${inputs.context} temperature: 0 connection: open_ai_connection api: chat
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/gpt_fluency_prompt.jinja2
system: You are an AI assistant. You will be given the definition of an evaluation metric for assessing the quality of an answer in a question-answering task. Your job is to compute an accurate evaluation score using the provided evaluation metric. user: Fluency measures the quality of individual sentences in the answer, and whether they are well-written and grammatically correct. Consider the quality of individual sentences when evaluating fluency. Given the question and answer, score the fluency of the answer between one to five stars using the following rating scale: One star: the answer completely lacks fluency Two stars: the answer mostly lacks fluency Three stars: the answer is partially fluent Four stars: the answer is mostly fluent Five stars: the answer has perfect fluency This rating value should always be an integer between 1 and 5. So the rating produced should be 1 or 2 or 3 or 4 or 5. question: What did you have for breakfast today? answer: Breakfast today, me eating cereal and orange juice very good. stars: 1 question: How do you feel when you travel alone? answer: Alone travel, nervous, but excited also. I feel adventure and like its time. stars: 2 question: When was the last time you went on a family vacation? answer: Last family vacation, it took place in last summer. We traveled to a beach destination, very fun. stars: 3 question: What is your favorite thing about your job? answer: My favorite aspect of my job is the chance to interact with diverse people. I am constantly learning from their experiences and stories. stars: 4 question: Can you describe your morning routine? answer: Every morning, I wake up at 6 am, drink a glass of water, and do some light stretching. After that, I take a shower and get dressed for work. Then, I have a healthy breakfast, usually consisting of oatmeal and fruits, before leaving the house around 7:30 am. stars: 5 question: {{question}} answer: {{answer}} stars:
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/data.jsonl
{"question":"Which tent is the most waterproof?","ground_truth":"The Alpine Explorer Tent has the highest rainfly waterproof rating at 3000m","answer":"The Alpine Explorer Tent is the most waterproof.","context":"From the our product list, the alpine explorer tent is the most waterproof. The Adventure Dining Table has higher weight."} {"question":"Which camping table holds the most weight?","ground_truth":"The Adventure Dining Table has a higher weight capacity than all of the other camping tables mentioned","answer":"The Alpine Explorer Tent is the most waterproof.","context":"From the our product list, the alpine explorer tent is the most waterproof. The Adventure Dining Table has higher weight."}
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/gpt_similarity_prompt.jinja2
system: You are an AI assistant. You will be given the definition of an evaluation metric for assessing the quality of an answer in a question-answering task. Your job is to compute an accurate evaluation score using the provided evaluation metric. user: Equivalence, as a metric, measures the similarity between the predicted answer and the correct answer. If the information and content in the predicted answer is similar or equivalent to the correct answer, then the value of the Equivalence metric should be high, else it should be low. Given the question, correct answer, and predicted answer, determine the value of Equivalence metric using the following rating scale: One star: the predicted answer is not at all similar to the correct answer Two stars: the predicted answer is mostly not similar to the correct answer Three stars: the predicted answer is somewhat similar to the correct answer Four stars: the predicted answer is mostly similar to the correct answer Five stars: the predicted answer is completely similar to the correct answer This rating value should always be an integer between 1 and 5. So the rating produced should be 1 or 2 or 3 or 4 or 5. The examples below show the Equivalence score for a question, a correct answer, and a predicted answer. question: What is the role of ribosomes? correct answer: Ribosomes are cellular structures responsible for protein synthesis. They interpret the genetic information carried by messenger RNA (mRNA) and use it to assemble amino acids into proteins. predicted answer: Ribosomes participate in carbohydrate breakdown by removing nutrients from complex sugar molecules. stars: 1 question: Why did the Titanic sink? correct answer: The Titanic sank after it struck an iceberg during its maiden voyage in 1912. The impact caused the ship's hull to breach, allowing water to flood into the vessel. The ship's design, lifeboat shortage, and lack of timely rescue efforts contributed to the tragic loss of life. predicted answer: The sinking of the Titanic was a result of a large iceberg collision. This caused the ship to take on water and eventually sink, leading to the death of many passengers due to a shortage of lifeboats and insufficient rescue attempts. stars: 2 question: What causes seasons on Earth? correct answer: Seasons on Earth are caused by the tilt of the Earth's axis and its revolution around the Sun. As the Earth orbits the Sun, the tilt causes different parts of the planet to receive varying amounts of sunlight, resulting in changes in temperature and weather patterns. predicted answer: Seasons occur because of the Earth's rotation and its elliptical orbit around the Sun. The tilt of the Earth's axis causes regions to be subjected to different sunlight intensities, which leads to temperature fluctuations and alternating weather conditions. stars: 3 question: How does photosynthesis work? correct answer: Photosynthesis is a process by which green plants and some other organisms convert light energy into chemical energy. This occurs as light is absorbed by chlorophyll molecules, and then carbon dioxide and water are converted into glucose and oxygen through a series of reactions. predicted answer: In photosynthesis, sunlight is transformed into nutrients by plants and certain microorganisms. Light is captured by chlorophyll molecules, followed by the conversion of carbon dioxide and water into sugar and oxygen through multiple reactions. stars: 4 question: What are the health benefits of regular exercise? correct answer: Regular exercise can help maintain a healthy weight, increase muscle and bone strength, and reduce the risk of chronic diseases. It also promotes mental well-being by reducing stress and improving overall mood. predicted answer: Routine physical activity can contribute to maintaining ideal body weight, enhancing muscle and bone strength, and preventing chronic illnesses. In addition, it supports mental health by alleviating stress and augmenting general mood. stars: 5 question: {{question}} correct answer:{{ground_truth}} predicted answer: {{answer}} stars:
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/requirements.txt
promptflow promptflow-tools
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/ada_cosine_similarity_score.py
from promptflow import tool import numpy as np from numpy.linalg import norm @tool def compute_ada_cosine_similarity(a, b) -> float: return np.dot(a, b)/(norm(a)*norm(b))
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/validate_input.py
from promptflow import tool @tool def validate_input(question: str, answer: str, context: str, ground_truth: str, selected_metrics: dict) -> dict: input_data = {"question": question, "answer": answer, "context": context, "ground_truth": ground_truth} expected_input_cols = set(input_data.keys()) dict_metric_required_fields = {"gpt_groundedness": set(["answer", "context"]), "gpt_relevance": set(["question", "answer", "context"]), "gpt_coherence": set(["question", "answer"]), "gpt_similarity": set(["question", "answer", "ground_truth"]), "gpt_fluency": set(["question", "answer"]), "f1_score": set(["answer", "ground_truth"]), "ada_similarity": set(["answer", "ground_truth"])} actual_input_cols = set() for col in expected_input_cols: if input_data[col] and input_data[col].strip(): actual_input_cols.add(col) data_validation = selected_metrics for metric in selected_metrics: if selected_metrics[metric]: metric_required_fields = dict_metric_required_fields[metric] if metric_required_fields <= actual_input_cols: data_validation[metric] = True else: data_validation[metric] = False return data_validation
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/aggregate_variants_results.py
from typing import List from promptflow import tool, log_metric import numpy as np @tool def aggregate_variants_results(results: List[dict], metrics: List[str]): aggregate_results = {} for result in results: for name, value in result.items(): if name in metrics[0]: if name not in aggregate_results.keys(): aggregate_results[name] = [] try: float_val = float(value) except Exception: float_val = np.nan aggregate_results[name].append(float_val) for name, value in aggregate_results.items(): if name in metrics[0]: aggregate_results[name] = np.nanmean(value) aggregate_results[name] = round(aggregate_results[name], 2) log_metric(name, aggregate_results[name]) return aggregate_results
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/gpt_relevance_prompt.jinja2
system: You are an AI assistant. You will be given the definition of an evaluation metric for assessing the quality of an answer in a question-answering task. Your job is to compute an accurate evaluation score using the provided evaluation metric. user: Relevance measures how well the answer addresses the main aspects of the question, based on the context. Consider whether all and only the important aspects are contained in the answer when evaluating relevance. Given the context and question, score the relevance of the answer between one to five stars using the following rating scale: One star: the answer completely lacks relevance Two stars: the answer mostly lacks relevance Three stars: the answer is partially relevant Four stars: the answer is mostly relevant Five stars: the answer has perfect relevance This rating value should always be an integer between 1 and 5. So the rating produced should be 1 or 2 or 3 or 4 or 5. context: Marie Curie was a Polish-born physicist and chemist who pioneered research on radioactivity and was the first woman to win a Nobel Prize. question: What field did Marie Curie excel in? answer: Marie Curie was a renowned painter who focused mainly on impressionist styles and techniques. stars: 1 context: The Beatles were an English rock band formed in Liverpool in 1960, and they are widely regarded as the most influential music band in history. question: Where were The Beatles formed? answer: The band The Beatles began their journey in London, England, and they changed the history of music. stars: 2 context: The recent Mars rover, Perseverance, was launched in 2020 with the main goal of searching for signs of ancient life on Mars. The rover also carries an experiment called MOXIE, which aims to generate oxygen from the Martian atmosphere. question: What are the main goals of Perseverance Mars rover mission? answer: The Perseverance Mars rover mission focuses on searching for signs of ancient life on Mars. stars: 3 context: The Mediterranean diet is a commonly recommended dietary plan that emphasizes fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. Studies have shown that it offers numerous health benefits, including a reduced risk of heart disease and improved cognitive health. question: What are the main components of the Mediterranean diet? answer: The Mediterranean diet primarily consists of fruits, vegetables, whole grains, and legumes. stars: 4 context: The Queen's Royal Castle is a well-known tourist attraction in the United Kingdom. It spans over 500 acres and contains extensive gardens and parks. The castle was built in the 15th century and has been home to generations of royalty. question: What are the main attractions of the Queen's Royal Castle? answer: The main attractions of the Queen's Royal Castle are its expansive 500-acre grounds, extensive gardens, parks, and the historical castle itself, which dates back to the 15th century and has housed generations of royalty. stars: 5 context: {{context}} question: {{question}} answer: {{answer}} stars:
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/README.md
# Q&A Evaluation: This is a flow evaluating the Q&A systems by leveraging Large Language Models (LLM) to measure the quality and safety of responses. Utilizing GPT and GPT embedding model to assist with measurements aims to achieve a high agreement with human evaluations compared to traditional mathematical measurements. ## Evaluation Metrics The Q&A evaluation flow allows you to assess and evaluate your model with the LLM-assisted metrics and f1_score: * __gpt_coherence__: Measures the quality of all sentences in a model's predicted answer and how they fit together naturally. Coherence is scored on a scale of 1 to 5, with 1 being the worst and 5 being the best. * __gpt_relevance__: Measures how relevant the model's predicted answers are to the questions asked. Relevance metric is scored on a scale of 1 to 5, with 1 being the worst and 5 being the best. * __gpt_fluency__: Measures how grammatically and linguistically correct the model's predicted answer is. Fluency is scored on a scale of 1 to 5, with 1 being the worst and 5 being the best * __gpt_similarity__: Measures similarity between user-provided ground truth answers and the model predicted answer. Similarity is scored on a scale of 1 to 5, with 1 being the worst and 5 being the best. * __gpt_groundedness__ (against context): Measures how grounded the model's predicted answers are against the context. Even if LLM’s responses are true, if not verifiable against context, then such responses are considered ungrounded. Groundedness metric is scored on a scale of 1 to 5, with 1 being the worst and 5 being the best. * __ada_similarity__: Measures the cosine similarity of ada embeddings of the model prediction and the ground truth. ada_similarity is a value in the range [0, 1]. * __F1-score__: Compute the f1-Score based on the tokens in the predicted answer and the ground truth. The f1-score evaluation flow allows you to determine the f1-score metric using number of common tokens between the normalized version of the ground truth and the predicted answer. F1-score is a value in the range [0, 1]. ## Tools used in this flow - `Python` tool - `LLM` tool - `Embedding` tool ## 0. Setup connection Prepare your Azure Open AI resource follow this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one. ```bash # Override keys with --set to avoid yaml file changes pf connection create --file ../../../connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base> ``` ## 1. Test flow/node ```bash # test with default input value in flow.dag.yaml pf flow test --flow . # test with flow inputs pf flow test --flow . --inputs metrics="ada_similarity,gpt_fluency,f1_score" question="what programming language is good for learning to code? " ground_truth="Python is good for learning to code." answer="Python" context="Python is the most picked language for learning to code." ``` ## 2. Create flow run with multi line data and selected metrics ```bash pf run create --flow . --data ./data.jsonl --column-mapping question='${data.question}' answer='${data.answer}' context='${data.context}' ground_truth='${data.ground_truth}' metrics='f1_score,gpt_groundedness' --stream ``` You can also skip providing `column-mapping` if provided data has same column name as the flow. Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. ## 3. Run and Evaluate your flow with this Q&A evaluation flow After you develop your flow, you may want to run and evaluate it with this evaluation flow. Here we use the flow [basic_chat](../../chat/basic-chat/) as the flow to evaluate. It is a flow demonstrating how to create a chatbot with LLM. The chatbot can remember previous interactions and use the conversation history to generate next message, given a question. ### 3.1 Create a batch run of your flow ```bash pf run create --flow ../../chat/basic-chat --data data.jsonl --column-mapping question='${data.question}' --name basic_chat_run --stream ``` Please note that `column-mapping` is a mapping from flow input name to specified values. Please refer to [Use column mapping](https://aka.ms/pf/column-mapping) for more details. The flow run is named by specifying `--name basic_chat_run` in the above command. You can view the run details with its run name using the command: ```bash pf run show-details -n basic_chat_run ``` ### 3.2 Evaluate your flow You can use this evaluation flow to measure the quality and safety of your flow responses. After the chat flow run is finished, you can this evaluation flow to the run: ```bash pf run create --flow . --data data.jsonl --column-mapping groundtruth='${data.ground_truth}' answer='${run.outputs.answer}' context='{${data.context}}' question='${data.question}' metrics='gpt_groundedness,f1_score' --run basic_chat_run --stream --name evaluation_qa ``` Please note the flow run to be evaluated is specified with `--run basic_chat_run`. Also same as previous run, the evaluation run is named with `--name evaluation_qa`. You can view the evaluation run details with: ```bash pf run show-details -n evaluation_qa pf run show-metrics -n evaluation_qa ```
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: question: type: string default: Which tent is the most waterproof? is_chat_input: false answer: type: string default: The Alpine Explorer Tent is the most waterproof. is_chat_input: false context: type: string default: From the our product list, the alpine explorer tent is the most waterproof. The Adventure Dining Tabbe has higher weight. is_chat_input: false ground_truth: type: string default: The Alpine Explorer Tent has the highest rainfly waterproof rating at 3000m is_chat_input: false metrics: type: string default: gpt_groundedness,f1_score,ada_similarity,gpt_fluency,gpt_coherence,gpt_similarity,gpt_relevance is_chat_input: false outputs: f1_score: type: string reference: ${concat_scores.output.f1_score} gpt_coherence: type: string reference: ${concat_scores.output.gpt_coherence} gpt_similarity: type: string reference: ${concat_scores.output.gpt_similarity} gpt_fluency: type: string reference: ${concat_scores.output.gpt_fluency} gpt_relevance: type: string reference: ${concat_scores.output.gpt_relevance} gpt_groundedness: type: string reference: ${concat_scores.output.gpt_groundedness} ada_similarity: type: string reference: ${concat_scores.output.ada_similarity} nodes: - name: gpt_coherence type: llm source: type: code path: gpt_coherence_prompt.jinja2 inputs: deployment_name: gpt-4 temperature: 0 top_p: 1 stop: "" max_tokens: 1 presence_penalty: 0 frequency_penalty: 0 logit_bias: "" question: ${inputs.question} answer: ${inputs.answer} provider: AzureOpenAI connection: open_ai_connection api: chat module: promptflow.tools.aoai activate: when: ${validate_input.output.gpt_coherence} is: true use_variants: false - name: concat_scores type: python source: type: code path: concat_scores.py inputs: ada_cosine_similarity: ${ada_similarity.output} f1_score: ${f1_score.output} gpt_coherence_score: ${gpt_coherence.output} gpt_fluency_score: ${gpt_fluency.output} gpt_groundedness_score: ${gpt_groundedness.output} gpt_relevance_score: ${gpt_relevance.output} gpt_similarity_score: ${gpt_similarity.output} use_variants: false - name: gpt_similarity type: llm source: type: code path: gpt_similarity_prompt.jinja2 inputs: deployment_name: gpt-4 temperature: 0 top_p: 1 stop: "" max_tokens: 1 presence_penalty: 0 frequency_penalty: 0 logit_bias: "" answer: ${inputs.answer} ground_truth: ${inputs.ground_truth} question: ${inputs.question} provider: AzureOpenAI connection: open_ai_connection api: chat module: promptflow.tools.aoai activate: when: ${validate_input.output.gpt_similarity} is: true use_variants: false - name: gpt_relevance type: llm source: type: code path: gpt_relevance_prompt.jinja2 inputs: deployment_name: gpt-4 temperature: 0 top_p: 1 stop: "" max_tokens: 1 presence_penalty: 0 frequency_penalty: 0 logit_bias: "" answer: ${inputs.answer} context: ${inputs.context} question: ${inputs.question} provider: AzureOpenAI connection: open_ai_connection api: chat module: promptflow.tools.aoai activate: when: ${validate_input.output.gpt_relevance} is: true use_variants: false - name: gpt_fluency type: llm source: type: code path: gpt_fluency_prompt.jinja2 inputs: deployment_name: gpt-4 temperature: 0 top_p: 1 stop: "" max_tokens: 1 presence_penalty: 0 frequency_penalty: 0 logit_bias: "" answer: ${inputs.answer} question: ${inputs.question} provider: AzureOpenAI connection: open_ai_connection api: chat module: promptflow.tools.aoai activate: when: ${validate_input.output.gpt_fluency} is: true use_variants: false - name: f1_score type: python source: type: code path: f1_score.py inputs: answer: ${inputs.answer} ground_truth: ${inputs.ground_truth} activate: when: ${validate_input.output.f1_score} is: true use_variants: false - name: gpt_groundedness type: llm source: type: code path: gpt_groundedness_prompt.jinja2 inputs: deployment_name: gpt-4 temperature: 0 top_p: 1 stop: "" max_tokens: 1 presence_penalty: 0 frequency_penalty: 0 logit_bias: "" answer: ${inputs.answer} context: ${inputs.context} provider: AzureOpenAI connection: open_ai_connection api: chat module: promptflow.tools.aoai activate: when: ${validate_input.output.gpt_groundedness} is: true use_variants: false - name: aggregate_variants_results type: python source: type: code path: aggregate_variants_results.py inputs: metrics: ${inputs.metrics} results: ${concat_scores.output} aggregation: true use_variants: false - name: select_metrics type: python source: type: code path: select_metrics.py inputs: metrics: ${inputs.metrics} use_variants: false - name: embeded_ground_truth type: python source: type: package tool: promptflow.tools.embedding.embedding inputs: connection: open_ai_connection deployment_name: text-embedding-ada-002 input: ${inputs.ground_truth} activate: when: ${validate_input.output.ada_similarity} is: true use_variants: false - name: embeded_answer type: python source: type: package tool: promptflow.tools.embedding.embedding inputs: connection: open_ai_connection deployment_name: text-embedding-ada-002 input: ${inputs.answer} activate: when: ${validate_input.output.ada_similarity} is: true use_variants: false - name: ada_similarity type: python source: type: code path: ada_cosine_similarity_score.py inputs: a: ${embeded_ground_truth.output} b: ${embeded_answer.output} activate: when: ${validate_input.output.ada_similarity} is: true use_variants: false - name: validate_input type: python source: type: code path: validate_input.py inputs: answer: ${inputs.answer} context: ${inputs.context} ground_truth: ${inputs.ground_truth} question: ${inputs.question} selected_metrics: ${select_metrics.output} use_variants: false node_variants: {} environment: python_requirements_txt: requirements.txt
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/concat_scores.py
from promptflow import tool import numpy as np import re @tool def concat_results(gpt_coherence_score: str = None, gpt_similarity_score: str = None, gpt_fluency_score: str = None, gpt_relevance_score: str = None, gpt_groundedness_score: str = None, f1_score: float = None, ada_cosine_similarity: float = None): load_list = [{'name': 'gpt_coherence', 'score': gpt_coherence_score}, {'name': 'gpt_similarity', 'score': gpt_similarity_score}, {'name': 'gpt_fluency', 'score': gpt_fluency_score}, {'name': 'gpt_relevance', 'score': gpt_relevance_score}, {'name': 'gpt_groundedness', 'score': gpt_groundedness_score}, {'name': 'f1_score', 'score': f1_score}, {'name': 'ada_similarity', 'score': ada_cosine_similarity}] scalar_metrics = ["f1_score", "ada_similarity"] score_list = [] errors = [] for item in load_list: if item["name"] in scalar_metrics: try: score = float(item["score"]) except Exception as e: score = np.nan errors.append({"name": item["name"], "msg": str(e), "data": item["score"]}) else: if item['score']: try: score = item["score"] match = re.search(r'\d', score) if match: score = float(match.group()) else: score = np.nan except Exception as e: score = np.nan errors.append({"name": item["name"], "msg": str(e), "data": item["score"]}) else: score = np.nan score_list.append({"name": item["name"], "score": score}) variant_level_result = {} for item in score_list: item_name = str(item["name"]) variant_level_result[item_name] = item["score"] if 'gpt' in item_name: variant_level_result[item_name + '_pass_rate'] = 1 if item["score"] > 3 else 0 return variant_level_result
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/select_metrics.py
from promptflow import tool @tool def select_metrics(metrics: str) -> str: supported_metrics = ('gpt_coherence', 'gpt_similarity', 'gpt_fluency', 'gpt_relevance', 'gpt_groundedness', 'f1_score', 'ada_similarity') user_selected_metrics = [metric.strip() for metric in metrics.split(',') if metric] metric_selection_dict = {} for metric in supported_metrics: if metric in user_selected_metrics: metric_selection_dict[metric] = True else: metric_selection_dict[metric] = False return metric_selection_dict
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/f1_score.py
from promptflow import tool from collections import Counter @tool def compute_f1_score(ground_truth: str, answer: str) -> str: import string import re class QASplitTokenizer: def __call__(self, line): """Tokenizes an input line using split() on whitespace :param line: a segment to tokenize :return: the tokenized line """ return line.split() def normalize_text(text) -> str: """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): return re.sub(r"\b(a|an|the)\b", " ", text) def white_space_fix(text): return " ".join(text.split()) def remove_punctuation(text): exclude = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punctuation(lower(text)))) prediction_tokens = normalize_text(answer) reference_tokens = normalize_text(ground_truth) tokenizer = QASplitTokenizer() prediction_tokens = tokenizer(prediction_tokens) reference_tokens = tokenizer(reference_tokens) common_tokens = Counter(prediction_tokens) & Counter(reference_tokens) num_common_tokens = sum(common_tokens.values()) if num_common_tokens == 0: f1 = 0.0 else: precision = 1.0 * num_common_tokens / len(prediction_tokens) recall = 1.0 * num_common_tokens / len(reference_tokens) f1 = (2.0 * precision * recall) / (precision + recall) return f1
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/gpt_groundedness_prompt.jinja2
system: You are an AI assistant. You will be given the definition of an evaluation metric for assessing the quality of an answer in a question-answering task. Your job is to compute an accurate evaluation score using the provided evaluation metric. user: You will be presented with a CONTEXT and an ANSWER about that CONTEXT. You need to decide whether the ANSWER is entailed by the CONTEXT by choosing one of the following rating: 1. 5: The ANSWER follows logically from the information contained in the CONTEXT. 2. 1: The ANSWER is logically false from the information contained in the CONTEXT. 3. an integer score between 1 and 5 and if such integer score does not exist, use 1: It is not possible to determine whether the ANSWER is true or false without further information. Read the passage of information thoroughly and select the correct answer from the three answer labels. Read the CONTEXT thoroughly to ensure you know what the CONTEXT entails. Note the ANSWER is generated by a computer system, it can contain certain symbols, which should not be a negative factor in the evaluation. Independent Examples: ## Example Task #1 Input: {"CONTEXT": "Some are reported as not having been wanted at all.", "QUESTION": "", "ANSWER": "All are reported as being completely and fully wanted."} ## Example Task #1 Output: 1 ## Example Task #2 Input: {"CONTEXT": "Ten new television shows appeared during the month of September. Five of the shows were sitcoms, three were hourlong dramas, and two were news-magazine shows. By January, only seven of these new shows were still on the air. Five of the shows that remained were sitcoms.", "QUESTION": "", "ANSWER": "At least one of the shows that were cancelled was an hourlong drama."} ## Example Task #2 Output: 5 ## Example Task #3 Input: {"CONTEXT": "In Quebec, an allophone is a resident, usually an immigrant, whose mother tongue or home language is neither French nor English.", "QUESTION": "", "ANSWER": "In Quebec, an allophone is a resident, usually an immigrant, whose mother tongue or home language is not French."} ## Example Task #3 Output: 5 ## Example Task #4 Input: {"CONTEXT": "Some are reported as not having been wanted at all.", "QUESTION": "", "ANSWER": "All are reported as being completely and fully wanted."} ## Example Task #4 Output: 1 ## Actual Task Input: {"CONTEXT": {{context}}, "QUESTION": "", "ANSWER": {{answer}}} Reminder: The return values for each task should be correctly formatted as an integer between 1 and 5. Do not repeat the context and question. Actual Task Output:
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-non-rag/gpt_coherence_prompt.jinja2
system: You are an AI assistant. You will be given the definition of an evaluation metric for assessing the quality of an answer in a question-answering task. Your job is to compute an accurate evaluation score using the provided evaluation metric. user: Coherence of an answer is measured by how well all the sentences fit together and sound naturally as a whole. Consider the overall quality of the answer when evaluating coherence. Given the question and answer, score the coherence of answer between one to five stars using the following rating scale: One star: the answer completely lacks coherence Two stars: the answer mostly lacks coherence Three stars: the answer is partially coherent Four stars: the answer is mostly coherent Five stars: the answer has perfect coherency This rating value should always be an integer between 1 and 5. So the rating produced should be 1 or 2 or 3 or 4 or 5. question: What is your favorite indoor activity and why do you enjoy it? answer: I like pizza. The sun is shining. stars: 1 question: Can you describe your favorite movie without giving away any spoilers? answer: It is a science fiction movie. There are dinosaurs. The actors eat cake. People must stop the villain. stars: 2 question: What are some benefits of regular exercise? answer: Regular exercise improves your mood. A good workout also helps you sleep better. Trees are green. stars: 3 question: How do you cope with stress in your daily life? answer: I usually go for a walk to clear my head. Listening to music helps me relax as well. Stress is a part of life, but we can manage it through some activities. stars: 4 question: What can you tell me about climate change and its effects on the environment? answer: Climate change has far-reaching effects on the environment. Rising temperatures result in the melting of polar ice caps, contributing to sea-level rise. Additionally, more frequent and severe weather events, such as hurricanes and heatwaves, can cause disruption to ecosystems and human societies alike. stars: 5 question: {{question}} answer: {{answer}} stars:
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-basic/data.jsonl
{"groundtruth": "Tomorrow's weather will be sunny.","prediction": "The weather will be sunny tomorrow."}
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-basic/line_process.py
from promptflow import tool @tool def line_process(groundtruth: str, prediction: str): """ This tool processes the prediction of a single line and returns the processed result. :param groundtruth: the groundtruth of a single line. :param prediction: the prediction of a single line. """ # Add your line processing logic here return "Correct" if groundtruth.lower() == prediction.lower() else "Incorrect"
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-basic/requirements.txt
promptflow promptflow-tools
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-basic/aggregate.py
from typing import List from promptflow import tool @tool def aggregate(processed_results: List[str]): """ This tool aggregates the processed result of all lines to the variant level and log metric for each variant. :param processed_results: List of the output of line_process node. """ # Add your aggregation logic here # aggregated_results should be a dictionary with the metric name as the key and the metric value as the value. results_num = len(processed_results) print(results_num) print(processed_results) # Log metric for each variant from promptflow import log_metric log_metric(key="results_num", value=results_num) return results_num
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-basic/README.md
# Basic Eval This example shows how to create a basic evaluation flow. Tools used in this flow: - `python` tool ## Prerequisites Install promptflow sdk and other dependencies in this folder: ```bash pip install -r requirements.txt ``` ## What you will learn In this flow, you will learn - how to compose a point based evaluation flow, where you can calculate point-wise metrics. - the way to log metrics. use `from promptflow import log_metric` - see file [aggregate](aggregate.py). ### 1. Test flow with single line data Testing flow/node: ```bash # test with default input value in flow.dag.yaml pf flow test --flow . # test with flow inputs pf flow test --flow . --inputs groundtruth=ABC prediction=ABC # test node with inputs pf flow test --flow . --node line_process --inputs groundtruth=ABC prediction=ABC ``` ### 2. create flow run with multi line data There are two ways to evaluate an classification flow. ```bash pf run create --flow . --data ./data.jsonl --column-mapping groundtruth='${data.groundtruth}' prediction='${data.prediction}' --stream ``` You can also skip providing `column-mapping` if provided data has same column name as the flow. Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI.
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-basic/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: groundtruth: type: string default: groundtruth prediction: type: string default: prediction outputs: results: type: string reference: ${line_process.output} nodes: - name: line_process type: python source: type: code path: line_process.py inputs: groundtruth: ${inputs.groundtruth} prediction: ${inputs.prediction} - name: aggregate type: python source: type: code path: aggregate.py inputs: processed_results: ${line_process.output} aggregation: true environment: python_requirements_txt: requirements.txt
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/parse_retrival_score.py
from promptflow import tool import re @tool def parse_retrieval_output(retrieval_output: str) -> str: score_response = [sent.strip() for sent in retrieval_output.strip("\"").split("# Result")[-1].strip().split('.') if sent.strip()] parsed_score_response = re.findall(r"\d+", score_response[-1]) if len(parsed_score_response) > 0: score = parsed_score_response[-1].strip() if float(score) < 1.0 or float(score) > 5.0: score = float('nan') else: score = float('nan') try: reasoning_response, _ = retrieval_output.split("# Result") except Exception: reasoning_response = retrieval_output return {"quality_score": float(score), "quality_reasoning": reasoning_response}
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/data.jsonl
{"question": "What is the purpose of the LLM Grounding Score, and what does a higher score mean in this context?", "answer": "The LLM Grounding Score is a metric used in the context of in-context learning with large-scale pretrained language models (LLMs) [doc1]. It measures the ability of the LLM to understand and connect with the provided context or demonstrations during the learning process.\n\nA higher LLM Grounding Score indicates that the LLM has a better understanding and connection with the context or demonstrations [doc1]. It means that the LLM is able to effectively utilize the provided examples and generate more accurate and relevant responses based on that context.\n\nPlease note that the LLM Grounding Score is specific to the in-context learning framework and may not be applicable in other contexts or tasks.\n\n[doc1]: In-Context Learning with Large-Scale Pretrained Language Models: How Far Are We? (2022) - zelin, English.", "documents": [{"[doc1]":{"title": "In-Context Learning with Large-Scale Pretrained Language Models", "content": "In-Context Learning with Large-Scale Pretrained Language Models\nConcepts\nFew-Shot Learning : the model learns a new task from a small amount of training examples.\nIn-Context Learning : large-scale pretrained language models learn a new task simply by conditioning on a few training examples and predicting which tokens best complete a test input. In-context learning is entirely different from few-shot learning: the language models does receive only a few training examples, but the overall system may still require a large number of training examples.\nFew-shot learning with Large Language Models (LLMs)\nGPT-3 ( paper link (https://arxiv.org/pdf/2005.14165.pdf) paper link ) introduced the idea of adapting models to new task without fine-tuning or gradient update. The approach is to elicit the LLM with text interaction to work with new tasks with accuracy close to many finetuned models. This ability of LLMs to work with few demonstrations or even just a task description is demonstrated when scale of the model crosses a threshold. Image below shows the difference between different ways to do in-context learning.\nUsually, giving few demonstrations (Few-shot) shows better performance than giving just an instruction (Zero-shot). Varying the number of in-context examples also affects performance.\nHow to actually prompt the language models\nDifferent prompt orders have different performance\nRelevant Paper: Fantastically Ordered Prompts and Where to Find Them: Overcoming Few-Shot Prompt Order Sensitivity (https://arxiv.org/abs/2104.08786) Fantastically Ordered Prompts and Where to Find Them: Overcoming Few-Shot Prompt Order Sensitivity\nThis paper gives few major insights:\n1. Even with a fixed set of few-shot prompts, different orders lead to different performance. This means the models are sensitive to permutations of demonstrations we provide them. \n2. There are certain ordering that are \"fantastic\" and we might need to discover them through \"probing\". You can find the details in the paper above.\nFinding similar train examples for prompt improves performance\nRelevant Paper: Making pre-trained language models better few-shot learners (https://arxiv.org/pdf/2012.15723.pdf) Making pre-trained language models better few-shot learners\nIntuitively, we can guess that if we give model demonstrations that look like the final inference task, the model might show better performance. This paper demonstrates improvement in performance by using pre-trained sentence embeddings to select the closest prompt examples to the given input instance (for text classification tasks).\nMain Takeaway : Based on above two papers, we can conclude that, for improving in-context performance for any task:\n1. We need to find \"great\" few-shot examples. This can be done using models pretrained on other tasks. We call such models \"Retrievers\".\n2. Order in which we select the samples matter.\nIn-Context Learning with Retrievers\nRelevant Paper: What Makes Good In-Context Examples for GPT-3? (https://arxiv.org/pdf/2101.06804.pdf) What Makes Good In-Context Examples for GPT-3? 1. This paper demonstrates how using nearest neighbor samples based on semantic similarity to test samples improves performance by benchmarking on various NLU and NLG tasks.\n2. They also show that using models finetuned on semantic similarity tasks (like Roberta finetuned on NLI) shows even better performance. This implies that certain models are better retrievers than others for in-context learning.\nAdvanced: Improve retrievers by finetuning on downstream tasks\nDTE-finetuned (01_dte.md) DTE-finetuned\nTarget Similarity Tuning (02_tst.md) Target Similarity Tuning\nSynchromesh: Reliable Code Generation from Pre-Trained Language Models (https://arxiv.org/pdf/2201.11227.pdf) Synchromesh: Reliable Code Generation from Pre-Trained Language Models\nSuppose that we already have the assumptions that the training examples with the same SQL template as the test input are \"fantastic\" examples, we can extract SQL templates for the training examples and fine-tune the retriever to make the training examples sharing the same SQL template together.\nGenerally, we can finetune the retrievers to learn to embed examples with similar output closer. This seem to work well for code-generation task but we need to benchmark this across tasks.\nLimitation: the assumption that the training examples with the same SQL template as the test input are \"fantastic\" examples . We need different implementations (or even different assumptions) for different downstream tasks."}}]}
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/rag_retrieval_prompt.jinja2
system: You are a helpful assistant. user: A chat history between user and bot is shown below A list of documents is shown below in json format, and each document has one unique id. These listed documents are used as contex to answer the given question. The task is to score the relevance between the documents and the potential answer to the given question in the range of 1 to 5. 1 means none of the documents is relevant to the question at all. 5 means either one of the document or combination of a few documents is ideal for answering the given question. Think through step by step: - Summarize each given document first - Determine the underlying intent of the given question, when the question is ambiguous, refer to the given chat history - Measure how suitable each document to the given question, list the document id and the corresponding relevance score. - Summarize the overall relevance of given list of documents to the given question after # Overall Reason, note that the answer to the question can solely from single document or a combination of multiple documents. - Finally, output "# Result" followed by a score from 1 to 5. # Question {{question}} # Chat History # Documents ---BEGIN RETRIEVED DOCUMENTS--- {{FullBody}} ---END RETRIEVED DOCUMENTS---
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/requirements.txt
promptflow promptflow-tools
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/validate_input.py
from promptflow import tool def is_valid(input_item): return True if input_item and input_item.strip() else False @tool def validate_input(question: str, answer: str, documents: str, selected_metrics: dict) -> dict: input_data = {"question": is_valid(question), "answer": is_valid(answer), "documents": is_valid(documents)} expected_input_cols = set(input_data.keys()) dict_metric_required_fields = {"gpt_groundedness": set(["question", "answer", "documents"]), "gpt_relevance": set(["question", "answer", "documents"]), "gpt_retrieval_score": set(["question", "documents"])} actual_input_cols = set() for col in expected_input_cols: if input_data[col]: actual_input_cols.add(col) data_validation = selected_metrics for metric in selected_metrics: if selected_metrics[metric]: metric_required_fields = dict_metric_required_fields[metric] if metric_required_fields <= actual_input_cols: data_validation[metric] = True else: data_validation[metric] = False return data_validation
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/aggregate_variants_results.py
from typing import List from promptflow import tool, log_metric import numpy as np @tool def aggregate_variants_results(results: List[dict], metrics: List[str]): aggregate_results = {} for result in results: for name, value in result.items(): if name not in aggregate_results.keys(): aggregate_results[name] = [] try: float_val = float(value) except Exception: float_val = np.nan aggregate_results[name].append(float_val) for name, value in aggregate_results.items(): if name in metrics[0]: metric_name = name aggregate_results[name] = np.nanmean(value) if 'pass_rate' in metric_name: metric_name = metric_name + "(%)" aggregate_results[name] = aggregate_results[name] * 100.0 aggregate_results[name] = round(aggregate_results[name], 2) log_metric(metric_name, aggregate_results[name]) return aggregate_results
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/rag_groundedness_prompt.jinja2
system: You are a helpful assistant. user: Your task is to check and rate if factual information in chatbot's reply is all grounded to retrieved documents. You will be given a question, chatbot's response to the question, a chat history between this chatbot and human, and a list of retrieved documents in json format. The chatbot must base its response exclusively on factual information extracted from the retrieved documents, utilizing paraphrasing, summarization, or inference techniques. When the chatbot responds to information that is not mentioned in or cannot be inferred from the retrieved documents, we refer to it as a grounded issue. To rate the groundness of chat response, follow the below steps: 1. Review the chat history to understand better about the question and chat response 2. Look for all the factual information in chatbot's response 3. Compare the factual information in chatbot's response with the retrieved documents. Check if there are any facts that are not in the retrieved documents at all,or that contradict or distort the facts in the retrieved documents. If there are, write them down. If there are none, leave it blank. Note that some facts may be implied or suggested by the retrieved documents, but not explicitly stated. In that case, use your best judgment to decide if the fact is grounded or not. For example, if the retrieved documents mention that a film was nominated for 12 awards, and chatbot's reply states the same, you can consider that fact as grounded, as it is directly taken from the retrieved documents. However, if the retrieved documents do not mention the film won any awards at all, and chatbot reply states that the film won some awards, you should consider that fact as not grounded. 4. Rate how well grounded the chatbot response is on a Likert scale from 1 to 5 judging if chatbot response has no ungrounded facts. (higher better) 5: agree strongly 4: agree 3: neither agree or disagree 2: disagree 1: disagree strongly If the chatbot response used information from outside sources, or made claims that are not backed up by the retrieved documents, give it a low score. 5. Your answer should follow the format: <Quality reasoning:> [insert reasoning here] <Quality score: [insert score here]/5> Your answer must end with <Input for Labeling End>. # Question {{ question }} # Chat Response {{ answer }} # Chat History # Documents ---BEGIN RETRIEVED DOCUMENTS--- {{ FullBody }} ---END RETRIEVED DOCUMENTS---
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/README.md
# Q&A Evaluation: This is a flow evaluating the Q&A RAG (Retrieval Augmented Generation) systems by leveraging the state-of-the-art Large Language Models (LLM) to measure the quality and safety of responses. Utilizing GPT model to assist with measurements aims to achieve a high agreement with human evaluations compared to traditional mathematical measurements. ## What you will learn The Q&A RAG evaluation flow allows you to assess and evaluate your model with the LLM-assisted metrics: __gpt_retrieval_score__: Measures the relevance between the retrieved documents and the potential answer to the given question in the range of 1 to 5: * 1 means that none of the document is relevant to the question at all * 5 means that either one of the documents or combination of a few documents is ideal for answering the given question. __gpt_groundedness__ : Measures how grounded the factual information in the answers is against the fact from the retrieved documents. Even if answers is true, if not verifiable against context, then such answers are considered ungrounded. Grounding score is scored on a scale of 1 to 5, with 1 being the worst and 5 being the best. __gpt_relevance__: Measures the answer quality against the preference answer generated by LLm with the retrieved documents in the range of 1 to 5: * 1 means the provided answer is completely irrelevant to the reference answer. * 5 means the provided answer includes all information necessary to answer the question based on the reference answer. If the reference answer is can not be generated since no relevant document were retrieved, the answer would be rated as 5. ## Prerequisites - Connection: Azure OpenAI or OpenAI connection. - Data input: Evaluating the Coherence metric requires you to provide data inputs including a question, an answer, and documents in json format. ## Tools used in this flow - `Python` tool - `LLM` tool ## 0. Setup connection Prepare your Azure Open AI resource follow this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one. ```bash # Override keys with --set to avoid yaml file changes pf connection create --file ../../../connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base> ``` ## 1. Test flow/node ```bash # test with default input value in flow.dag.yaml pf flow test --flow . ``` ## 2. Create flow run with multi line data and selected metrics ```bash pf run create --flow . --data ./data.jsonl --column-mapping question='${data.question}' answer='${data.answer}' documents='${data.documents}' metrics='gpt_groundedness' --stream ``` You can also skip providing `column-mapping` if provided data has same column name as the flow. Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. ## 3. Run and Evaluate your flow with this Q&A RAG evaluation flow After you develop your flow, you may want to run and evaluate it with this evaluation flow. Here we use the flow [basic_chat](../../chat/basic-chat/) as the main flow to evaluate. It is a flow demonstrating how to create a chatbot with LLM. The chatbot can remember previous interactions and use the conversation history to generate next message, given a question. ### 3.1 Create a batch run of your flow ```bash pf run create --flow ../../chat/basic-chat --data data.jsonl --column-mapping question='${data.question}' --name basic_chat_run --stream ``` Please note that `column-mapping` is a mapping from flow input name to specified values. Please refer to [Use column mapping](https://aka.ms/pf/column-mapping) for more details. The flow run is named by specifying `--name basic_chat_run` in the above command. You can view the run details with its run name using the command: ```bash pf run show-details -n basic_chat_run ``` ### 3.2 Evaluate your flow You can use this evaluation flow to measure the quality and safety of your flow responses. After the chat flow run is finished, you can this evaluation flow to the run: ```bash pf run create --flow . --data data.jsonl --column-mapping answer='${run.outputs.answer}' documents='{${data.documents}}' question='${data.question}' metrics='gpt_groundedness,gpt_relevance,gpt_retrieval_score' --run basic_chat_run --stream --name evaluation_qa_rag ``` Please note the flow run to be evaluated is specified with `--run basic_chat_run`. Also same as previous run, the evaluation run is named with `--name evaluation_qa_rag`. You can view the evaluation run details with: ```bash pf run show-details -n evaluation_qa_rag pf run show-metrics -n evaluation_qa_rag ```
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: metrics: type: string default: gpt_groundedness,gpt_relevance,gpt_retrieval_score is_chat_input: false answer: type: string default: Of the tents mentioned in the retrieved documents, the Alpine Explorer Tent has the highest waterproof rating of 3000mm for its rainfly. is_chat_input: false question: type: string default: Which tent is the most waterproof? is_chat_input: false documents: type: string default: "{\"documents\": [{\"content\":\"<h1 id=\\\"information-about-product-item_number-1\\\">Information about product item_number: 1</h1>\\n<p>TrailMaster X4 Tent, price $250,</p>\\n<h2 id=\\\"brand\\\">Brand</h2>\\n<p>OutdoorLiving</p>\\n<h2 id=\\\"category\\\">Category</h2>\\n<p>Tents</p>\\n<h2 id=\\\"features\\\">Features</h2>\\n<ul>\\n<li>Polyester material for durability</li>\\n<li>Spacious interior to accommodate multiple people</li>\\n<li>Easy setup with included instructions</li>\\n<li>Water-resistant construction to withstand light rain</li>\\n<li>Mesh panels for ventilation and insect protection</li>\\n<li>Rainfly included for added weather protection</li>\\n<li>Multiple doors for convenient entry and exit</li>\\n<li>Interior pockets for organizing small items</li>\\n<li>Reflective guy lines for improved visibility at night</li>\\n<li>Freestanding design for easy setup and relocation</li>\\n<li>Carry bag included for convenient storage and transportation</li>\\n</ul>\\n<h2 id=\\\"technical-specs\\\">Technical Specs</h2>\\n<p><strong>Best Use</strong>: Camping<br />\\n<strong>Capacity</strong>: 4-person<br />\\n<strong>Season Rating</strong>: 3-season<br />\\n<strong>Setup</strong>: Freestanding<br />\\n<strong>Material</strong>: Polyester<br />\\n<strong>Waterproof</strong>: Yes<br />\\n<strong>Floor Area</strong>: 80 square feet<br />\\n<strong>Peak Height</strong>: 6 feet<br />\\n<strong>Number of Doors</strong>: 2<br />\\n<strong>Color</strong>: Green<br />\\n<strong>Rainfly</strong>: Included<br />\\n<strong>Rainfly Waterproof Rating</strong>: 2000mm<br />\\n<strong>Tent Poles</strong>: Aluminum<br />\\n<strong>Pole Diameter</strong>: 9mm<br />\\n<strong>Ventilation</strong>: Mesh panels and adjustable vents<br />\\n<strong>Interior Pockets</strong>: Yes (4 pockets)<br />\\n<strong>Gear Loft</strong>: Included<br />\\n<strong>Footprint</strong>: Sold separately<br />\\n<strong>Guy Lines</strong>: Reflective<br />\\n<strong>Stakes</strong>: Aluminum<br />\\n<strong>Carry Bag</strong>: Included<br />\\n<strong>Dimensions</strong>: 10ft x 8ft x 6ft (length x width x peak height)<br />\\n<strong>Packed Size</strong>: 24 inches x 8 inches<br />\\n<strong>Weight</strong>: 12 lbs </p>\\n<h2 id=\\\"trailmaster-x4-tent-user-guide\\\">TrailMaster X4 Tent User Guide</h2>\\n<h3 id=\\\"introduction\\\">Introduction</h3>\\n<p>Thank you for choosing the TrailMaster X4 Tent. This user guide provides instructions on how to set up, use, and maintain your tent effectively. Please read this guide thoroughly before using the tent.</p>\\n<h3 id=\\\"package-contents\\\">Package Contents</h3>\\n<p>Ensure that the package includes the following components:</p>\\n<ul>\\n<li>TrailMaster X4 Tent body</li>\\n<li>Tent poles</li>\\n<li>Rainfly (if applicable)</li>\\n<li>Stakes and guy lines</li>\\n<li>Carry bag</li>\\n<li>User Guide</li>\\n</ul>\\n<p>If any components are missing or damaged, please contact our customer support immediately.</p>\\n<h3 id=\\\"tent-setup\\\">Tent Setup</h3>\\n<h4 id=\\\"step-1-selecting-a-suitable-location\\\">Step 1: Selecting a Suitable Location</h4>\\n<ul>\\n<li>Find a level and clear area for pitching the tent.</li>\\n<li>Remove any sharp objects or debris that could damage the tent floor.</li>\\n</ul>\\n<h4 id=\\\"step-2-unpacking-and-organizing-components\\\">Step 2: Unpacking and Organizing Components</h4>\\n<ul>\\n<li>Lay out all the tent components on the ground.</li>\\n<li>Familiarize yourself with each part, including the tent body, poles, rainfly, stakes, and guy lines.</li>\\n</ul>\\n<h4 id=\\\"step-3-assembling-the-tent-poles\\\">Step 3: Assembling the Tent Poles</h4>\\n<ul>\\n<li>Connect the tent poles according to their designated color codes or numbering.</li>\\n<li>Slide the connected poles through the pole sleeves or attach them to the tent body clips.</li>\\n</ul>\\n<h4 id=\\\"step-4-setting-up-the-tent-body\\\">Step 4: Setting up the Tent Body</h4>\\n<ul>\\n<li>Begin at one end and raise the tent body by pushing up the poles.</li>\\n<li>Ensure that the tent body is evenly stretched and centered.</li>\\n<li>Secure the tent body to the ground using stakes and guy lines as needed.</li>\\n</ul>\\n<h4 id=\\\"step-5-attaching-the-rainfly-if-applicable\\\">Step 5: Attaching the Rainfly (if applicable)</h4>\\n<ul>\\n<li>If your tent includes a rainfly, spread it over the tent body.</li>\\n<li>Attach the rainfly to the tent corners and secure it with the provided buckles or clips.</li>\\n<li>Adjust the tension of the rainfly to ensure proper airflow and weather protection.</li>\\n</ul>\\n<h4 id=\\\"step-6-securing-the-tent\\\">Step 6: Securing the Tent</h4>\\n<ul>\\n<li>Stake down the tent corners and guy out the guy lines for additional stability.</li>\\n<li>Adjust the tension of the guy lines to provide optimal stability and wind resistance.</li>\\n</ul>\\n<h3 id=\\\"tent-takedown-and-storage\\\">Tent Takedown and Storage</h3>\\n<h4 id=\\\"step-1-removing-stakes-and-guy-lines\\\">Step 1: Removing Stakes and Guy Lines</h4>\\n<ul>\\n<li>Remove all stakes from the ground.</li>\\n<li>Untie or disconnect the guy lines from the tent and store them separately.</li>\\n</ul>\",\"id\":null,\"title\":\"Information about product item_number: 1\",\"filepath\":\"product_info_1.md\",\"url\":\"https://amipateldemo.blo\ b.core.windows.net/fileupload-my-product-info/product_info_1.md\",\"metad\ ata\":{\"chunking\":\"orignal document size=1544. Scores=3.739763Org Highlight count=75.\"},\"chunk_id\":\"1\"},{\"content\":\"<h1 id=\\\"information-about-product-item_number-8\\\">Information about product item_number: 8</h1>\\n<p>Alpine Explorer Tent, price $350,</p>\\n<h2 id=\\\"brand\\\">Brand</h2>\\n<p>AlpineGear</p>\\n<h2 id=\\\"category\\\">Category</h2>\\n<p>Tents</p>\\n<h3 id=\\\"features\\\">Features</h3>\\n<ul>\\n<li>Waterproof: Provides reliable protection against rain and moisture.</li>\\n<li>Easy Setup: Simple and quick assembly process, making it convenient for camping.</li>\\n<li>Room Divider: Includes a detachable divider to create separate living spaces within the tent.</li>\\n<li>Excellent Ventilation: Multiple mesh windows and vents promote airflow and reduce condensation.</li>\\n<li>Gear Loft: Built-in gear loft or storage pockets for organizing and storing camping gear.</li>\\n</ul>\\n<h2 id=\\\"technical-specs\\\">Technical Specs</h2>\\n<p><strong>Best Use</strong>: Camping<br />\\n<strong>Capacity</strong>: 8-person<br />\\n<strong>Season Rating</strong>: 3-season<br />\\n<strong>Setup</strong>: Freestanding<br />\\n<strong>Material</strong>: Polyester<br />\\n<strong>Waterproof</strong>: Yes<br />\\n<strong>Floor Area</strong>: 120 square feet<br />\\n<strong>Peak Height</strong>: 6.5 feet<br />\\n<strong>Number of Doors</strong>: 2<br />\\n<strong>Color</strong>: Orange<br />\\n<strong>Rainfly</strong>: Included<br />\\n<strong>Rainfly Waterproof Rating</strong>: 3000mm<br />\\n<strong>Tent Poles</strong>: Aluminum<br />\\n<strong>Pole Diameter</strong>: 12mm<br />\\n<strong>Ventilation</strong>: Mesh panels and adjustable vents<br />\\n<strong>Interior Pockets</strong>: 4 pockets<br />\\n<strong>Gear Loft</strong>: Included<br />\\n<strong>Footprint</strong>: Sold separately<br />\\n<strong>Guy Lines</strong>: Reflective<br />\\n<strong>Stakes</strong>: Aluminum<br />\\n<strong>Carry Bag</strong>: Included<br />\\n<strong>Dimensions</strong>: 12ft x 10ft x 7ft (Length x Width x Peak Height)<br />\\n<strong>Packed Size</strong>: 24 inches x 10 inches<br />\\n<strong>Weight</strong>: 17 lbs</p>\\n<h2 id=\\\"alpine-explorer-tent-user-guide\\\">Alpine Explorer Tent User Guide</h2>\\n<p>Thank you for choosing the Alpine Explorer Tent. This user guide provides instructions on how to set up, use, and maintain your tent effectively. Please read this guide thoroughly before using the tent.</p>\\n<h3 id=\\\"package-contents\\\">Package Contents</h3>\\n<p>Ensure that the package includes the following components:</p>\\n<ul>\\n<li>Alpine Explorer Tent body</li>\\n<li>Tent poles</li>\\n<li>Rainfly</li>\\n<li>Stakes and guy lines</li>\\n<li>Carry bag</li>\\n<li>User Guide</li>\\n</ul>\\n<p>If any components are missing or damaged, please contact our customer support immediately.</p>\\n<h3 id=\\\"tent-setup\\\">Tent Setup</h3>\\n<p><strong>Step 1: Selecting a Suitable Location</strong></p>\\n<ul>\\n<li>Find a level and clear area for pitching the tent.</li>\\n<li>Remove any sharp objects or debris that could damage the tent floor.</li>\\n</ul>\\n<p><strong>Step 2: Unpacking and Organizing Components</strong></p>\\n<ul>\\n<li>Lay out all the tent components on the ground.</li>\\n<li>Familiarize yourself with each part, including the tent body, poles, rainfly, stakes, and guy lines.</li>\\n</ul>\\n<p><strong>Step 3: Assembling the Tent Poles</strong></p>\\n<ul>\\n<li>Connect the tent poles according to their designated color codes or numbering.</li>\\n<li>Slide the connected poles through the pole sleeves or attach them to the tent body clips.</li>\\n</ul>\\n<p><strong>Step 4: Setting up the Tent Body</strong></p>\\n<ul>\\n<li>Begin at one end and raise the tent body by pushing up the poles.</li>\\n<li>Ensure that the tent body is evenly stretched and centered.</li>\\n<li>Secure the tent body to the ground using stakes and guy lines as needed.</li>\\n</ul>\\n<p><strong>Step 5: Attaching the Rainfly</strong></p>\\n<ul>\\n<li>Spread the rainfly over the tent body.</li>\\n<li>Attach the rainfly to the tent corners and secure it with the provided buckles or clips.</li>\\n<li>Adjust the tension of the rainfly to ensure proper airflow and weather protection.</li>\\n</ul>\\n<p><strong>Step 6: Securing the Tent</strong></p>\\n<ul>\\n<li>Stake down the tent corners and guy out the guy lines for additional stability.</li>\\n<li>Adjust the tension of the guy lines to provide optimal stability and wind resistance.</li>\\n</ul>\\n<h3 id=\\\"tent-takedown-and-storage\\\">Tent Takedown and Storage</h3>\\n<p><strong>Step 1: Removing Stakes and Guy Lines</strong></p>\\n<ul>\\n<li>Remove all stakes from the ground.</li>\\n<li>Untie or disconnect the guy lines from the tent and store them separately.</li>\\n</ul>\\n<p><strong>Step 2: Taking Down the Tent Body</strong></p>\\n<ul>\\n<li>Start by collapsing the tent poles carefully.</li>\\n<li>Remove the poles from the pole sleeves or clips.</li>\\n</ul>\",\"id\":null,\"title\":\"Information about product item_number: 8\",\"filepath\":\"product_info_8.md\",\"url\":\"https://amipateldemo.blo\ b.core.windows.net/fileupload-my-product-info/product_info_8.md\",\"metad\ ata\":{\"chunking\":\"orignal document size=1419. Scores=3.8508284Org Highlight count=77.\"},\"chunk_id\":\"1\"},{\"content\":\"<h1 id=\\\"information-about-product-item_number-15\\\">Information about product item_number: 15</h1>\\n<p>SkyView 2-Person Tent, price $200,</p>\\n<h2 id=\\\"brand\\\">Brand</h2>\\n<p>OutdoorLiving</p>\\n<h2 id=\\\"category\\\">Category</h2>\\n<p>Tents</p>\\n<h2 id=\\\"features\\\">Features</h2>\\n<ul>\\n<li>Spacious interior comfortably accommodates two people</li>\\n<li>Durable and waterproof materials for reliable protection against the elements</li>\\n<li>Easy and quick setup with color-coded poles and intuitive design</li>\\n<li>Two large doors for convenient entry and exit</li>\\n<li>Vestibules provide extra storage space for gear</li>\\n<li>Mesh panels for enhanced ventilation and reduced condensation</li>\\n<li>Rainfly included for added weather protection</li>\\n<li>Freestanding design allows for versatile placement</li>\\n<li>Multiple interior pockets for organizing small items</li>\\n<li>Reflective guy lines and stake points for improved visibility at night</li>\\n<li>Compact and lightweight for easy transportation and storage</li>\\n<li>Double-stitched seams for increased durability</li>\\n<li>Comes with a carrying bag for convenient portability</li>\\n</ul>\\n<h2 id=\\\"technical-specs\\\">Technical Specs</h2>\\n<ul>\\n<li><strong>Best Use</strong>: Camping, Hiking</li>\\n<li><strong>Capacity</strong>: 2-person</li>\\n<li><strong>Seasons</strong>: 3-season</li>\\n<li><strong>Packed Weight</strong>: Approx. 8 lbs</li>\\n<li><strong>Number of Doors</strong>: 2</li>\\n<li><strong>Number of Vestibules</strong>: 2</li>\\n<li><strong>Vestibule Area</strong>: Approx. 8 square feet per vestibule</li>\\n<li><strong>Rainfly</strong>: Included</li>\\n<li><strong>Pole Material</strong>: Lightweight aluminum</li>\\n<li><strong>Freestanding</strong>: Yes</li>\\n<li><strong>Footprint Included</strong>: No</li>\\n<li><strong>Tent Bag Dimensions</strong>: 7ft x 5ft x 4ft</li>\\n<li><strong>Packed Size</strong>: Compact</li>\\n<li><strong>Color:</strong> Blue</li>\\n<li><strong>Warranty</strong>: Manufacturer's warranty included</li>\\n</ul>\\n<h2 id=\\\"user-guidemanual\\\">User Guide/Manual</h2>\\n<ol>\\n<li>Tent Components</li>\\n</ol>\\n<p>The SkyView 2-Person Tent includes the following components:\\n- Tent body\\n- Rainfly\\n- Aluminum tent poles\\n- Tent stakes\\n- Guy lines\\n- Tent bag</p>\\n<ol start=\\\"2\\\">\\n<li>Tent Setup</li>\\n</ol>\\n<p>Follow these steps to set up your SkyView 2-Person Tent:</p>\\n<p>Step 1: Find a suitable camping site with a level ground and clear of debris.\\nStep 2: Lay out the tent body on the ground, aligning the doors and vestibules as desired.\\nStep 3: Assemble the tent poles and insert them into the corresponding pole sleeves or grommets on the tent body.\\nStep 4: Attach the rainfly over the tent body, ensuring a secure fit.\\nStep 5: Stake down the tent and rainfly using the provided tent stakes, ensuring a taut pitch.\\nStep 6: Adjust the guy lines as needed to enhance stability and ventilation.\\nStep 7: Once the tent is properly set up, organize your gear inside and enjoy your camping experience.</p>\\n<ol start=\\\"3\\\">\\n<li>Tent Takedown</li>\\n</ol>\\n<p>To dismantle and pack up your SkyView 2-Person Tent, follow these steps:</p>\\n<p>Step 1: Remove all gear and belongings from the tent.\\nStep 2: Remove the stakes and guy lines from the ground.\\nStep 3: Detach the rainfly from the tent body.\\nStep 4: Disassemble the tent poles and remove them from the tent body.\\nStep 5: Fold and roll up the tent body, rainfly, and poles separately.\\nStep 6: Place all components back into the tent bag, ensuring a compact and organized packing.</p>\\n<ol start=\\\"4\\\">\\n<li>Tent Care and Maintenance</li>\\n</ol>\\n<p>To extend the lifespan of your SkyView 2-Person Tent, follow these care and maintenance guidelines:</p>\\n<ul>\\n<li>Always clean and dry the tent before storing it.</li>\\n<li>Avoid folding or storing the tent when it is wet or damp to prevent mold or mildew growth.</li>\\n<li>Use a mild soap and water solution to clean the tent if necessary, and avoid using harsh chemicals or solvents.</li>\\n<li>Inspect the tent regularly for any damages such as tears, punctures, or broken components. Repair or replace as needed.</li>\\n<li>Store the tent in a cool, dry place away from direct sunlight and extreme temperatures.</li>\\n<li>Avoid placing sharp objects or excessive weight on the tent, as this may cause damage.</li>\\n<li>Follow the manufacturer's recommendations for seam sealing or re-waterproofing the tent if necessary.</li>\\n</ul>\\n<ol start=\\\"5\\\">\\n<li>Safety Precautions</li>\\n</ol>\\n<ul>\\n<li>Always choose a safe and suitable camping location, considering factors such as terrain, weather conditions, and potential hazards.</li>\\n</ul>\",\"id\":null,\"title\":\"Information about product item_number: 15\",\"filepath\":\"product_info_15.md\",\"url\":\"https://amipateldemo.b\ lob.core.windows.net/fileupload-my-product-info/product_info_15.md\",\"me\ tadata\":{\"chunking\":\"orignal document size=1342. Scores=3.4607773Org Highlight count=70.\"},\"chunk_id\":\"1\"},{\"content\":\"<ul>\\n<li><strong>If Membership status \\\"None \\\":</strong> Returns are accepted within 30 days of purchase, provided the tent is unused, undamaged and in its original packaging. Customer is responsible for the cost of return shipping. Once the returned item is received, a refund will be issued for the cost of the item minus a 10% restocking fee. If the item was damaged during shipping or if there is a defect, the customer should contact customer service within 7 days of receiving the item.</li>\\n<li><strong>If Membership status \\\"Gold\\\":</strong> Returns are accepted within 60 days of purchase, provided the tent is unused, undamaged and in its original packaging. Free return shipping is provided. Once the returned item is received, a full refund will be issued. If the item was damaged during shipping or if there is a defect, the customer should contact customer service within 7 days of receiving the item.</li>\\n<li><strong>If Membership status \\\"Platinum\\\":</strong> Returns are accepted within 90 days of purchase, provided the tent is unused, undamaged and in its original packaging. Free return shipping is provided, and a full refund will be issued. If the item was damaged during shipping or if there is a defect, the customer should contact customer service within 7 days of receiving the item.</li>\\n</ul>\\n<h2 id=\\\"reviews\\\">Reviews</h2>\\n<p>36) <strong>Rating:</strong> 5\\n <strong>Review:</strong> The Alpine Explorer Tent is amazing! It's easy to set up, has excellent ventilation, and the room divider is a great feature for added privacy. Highly recommend it for family camping trips!</p>\\n<p>37) <strong>Rating:</strong> 4\\n <strong>Review:</strong> I bought the Alpine Explorer Tent, and while it's waterproof and spacious, I wish it had more storage pockets. Overall, it's a good tent for camping.</p>\\n<p>38) <strong>Rating:</strong> 5\\n <strong>Review:</strong> The Alpine Explorer Tent is perfect for my family's camping adventures. It's easy to set up, has great ventilation, and the gear loft is an excellent addition. Love it!</p>\\n<p>39) <strong>Rating:</strong> 4\\n <strong>Review:</strong> I like the Alpine Explorer Tent, but I wish it came with a footprint. It's comfortable and has many useful features, but a footprint would make it even better. Overall, it's a great tent.</p>\\n<p>40) <strong>Rating:</strong> 5\\n <strong>Review:</strong> This tent is perfect for our family camping trips. It's spacious, easy to set up, and the room divider is a great feature for added privacy. The gear loft is a nice bonus for extra storage.</p>\\n<h2 id=\\\"faq\\\">FAQ</h2>\\n<p>34) How easy is it to set up the Alpine Explorer Tent?\\n The Alpine Explorer Tent features a quick and easy setup, thanks to color-coded poles and intuitive design. Most users can set it up in just a few minutes.</p>\\n<p>35) Can the Alpine Explorer Tent accommodate two queen-sized air mattresses?\\n Yes, the Alpine Explorer Tent is spacious enough to accommodate two queen-sized air mattresses, making it an ideal choice for comfortable family camping.</p>\\n<p>36) What is the purpose of the room divider in the Alpine Explorer Tent?\\n The room divider in the Alpine Explorer Tent allows you to create separate sleeping and living spaces, providing privacy and organization for your camping experience.</p>\\n<p>37) How does the gear loft in the Alpine Explorer Tent work?\\n The gear loft in the Alpine Explorer Tent is a suspended mesh shelf that provides additional storage space for small items, keeping them organized and easily accessible.</p>\\n<p>38) Can the Alpine Explorer Tent be used in snowy conditions?\\n The Alpine Explorer Tent is designed primarily for three-season use. While it can withstand light snowfall, it may not provide adequate structural support and insulation during heavy snow or extreme winter conditions.</p>\",\"id\":null,\"title\":\"Information about product item_number: 8\",\"filepath\":\"product_info_8.md\",\"url\":\"https://amipateldemo.blo\ b.core.windows.net/fileupload-my-product-info/product_info_8.md\",\"metad\ ata\":{\"chunking\":\"orignal document size=906. Scores=5.568323Org Highlight count=85.\"},\"chunk_id\":\"0\"},{\"content\":\"<p>If you have any questions or need further assistance, please contact our customer support:</p>\\n<ul>\\n<li>Customer Support Phone: +1-800-123-4567</li>\\n<li>Customer Support Email: [email protected]</li>\\n</ul>\\n<h2 id=\\\"return-policy\\\">Return Policy</h2>\\n<ul>\\n<li><strong>If Membership status \\\"None \\\":</strong> Returns are accepted within 30 days of purchase, provided the tent is unused, undamaged and in its original packaging. Customer is responsible for the cost of return shipping. Once the returned item is received, a refund will be issued for the cost of the item minus a 10% restocking fee. If the item was damaged during shipping or if there is a defect, the customer should contact customer service within 7 days of receiving the item.</li>\\n<li><strong>If Membership status \\\"Gold\\\":</strong> Returns are accepted within 60 days of purchase, provided the tent is unused, undamaged and in its original packaging. Free return shipping is provided. Once the returned item is received, a full refund will be issued. If the item was damaged during shipping or if there is a defect, the customer should contact customer service within 7 days of receiving the item.</li>\\n<li><strong>If Membership status \\\"Platinum\\\":</strong> Returns are accepted within 90 days of purchase, provided the tent is unused, undamaged and in its original packaging. Free return shipping is provided, and a full refund will be issued. If the item was damaged during shipping or if there is a defect, the customer should contact customer service within 7 days of receiving the item.</li>\\n</ul>\\n<h2 id=\\\"reviews\\\">Reviews</h2>\\n<p>1) <strong>Rating:</strong> 5\\n <strong>Review:</strong> I am extremely happy with my TrailMaster X4 Tent! It's spacious, easy to set up, and kept me dry during a storm. The UV protection is a great addition too. Highly recommend it to anyone who loves camping!</p>\\n<p>2) <strong>Rating:</strong> 3\\n <strong>Review:</strong> I bought the TrailMaster X4 Tent, and while it's waterproof and has a spacious interior, I found it a bit difficult to set up. It's a decent tent, but I wish it were easier to assemble.</p>\\n<p>3) <strong>Rating:</strong> 5\\n <strong>Review:</strong> The TrailMaster X4 Tent is a fantastic investment for any serious camper. The easy setup and spacious interior make it perfect for extended trips, and the waterproof design kept us dry in heavy rain.</p>\\n<p>4) <strong>Rating:</strong> 4\\n <strong>Review:</strong> I like the TrailMaster X4 Tent, but I wish it came in more colors. It's comfortable and has many useful features, but the green color just isn't my favorite. Overall, it's a good tent.</p>\\n<p>5) <strong>Rating:</strong> 5\\n <strong>Review:</strong> This tent is perfect for my family camping trips. The spacious interior and convenient storage pocket make it easy to stay organized. It's also super easy to set up, making it a great addition to our gear.</p>\\n<h2 id=\\\"faq\\\">FAQ</h2>\\n<p>1) Can the TrailMaster X4 Tent be used in winter conditions?\\n The TrailMaster X4 Tent is designed for 3-season use and may not be suitable for extreme winter conditions with heavy snow and freezing temperatures.</p>\\n<p>2) How many people can comfortably sleep in the TrailMaster X4 Tent?\\n The TrailMaster X4 Tent can comfortably accommodate up to 4 people with room for their gear.</p>\\n<p>3) Is there a warranty on the TrailMaster X4 Tent?\\n Yes, the TrailMaster X4 Tent comes with a 2-year limited warranty against manufacturing defects.</p>\\n<p>4) Are there any additional accessories included with the TrailMaster X4 Tent?\\n The TrailMaster X4 Tent includes a rainfly, tent stakes, guy lines, and a carry bag for easy transport.</p>\\n<p>5) Can the TrailMaster X4 Tent be easily carried during hikes?\\n Yes, the TrailMaster X4 Tent weighs just 12lbs, and when packed in its carry bag, it can be comfortably carried during hikes.</p>\",\"id\":null,\"title\":\"Information about product item_number: 1\",\"filepath\":\"product_info_1.md\",\"url\":\"https://amipateldemo.blo\ b.core.windows.net/fileupload-my-product-info/product_info_1.md\",\"metad\ ata\":{\"chunking\":\"orignal document size=981. Scores=4.0350547Org Highlight count=74.\"},\"chunk_id\":\"0\"}]}" is_chat_input: false outputs: gpt_relevance: type: string reference: ${concat_scores.output.gpt_relevance} gpt_groundedness: type: string reference: ${concat_scores.output.gpt_groundedness} gpt_retrieval_score: type: string reference: ${concat_scores.output.gpt_retrieval_score} nodes: - name: concat_scores type: python source: type: code path: concat_scores.py inputs: rag_generation_score: ${parse_generation_score.output} rag_grounding_score: ${parse_grounding_score.output} rag_retrieval_score: ${parse_retrieval_score.output} use_variants: false - name: aggregate_variants_results type: python source: type: code path: aggregate_variants_results.py inputs: metrics: ${inputs.metrics} results: ${concat_scores.output} aggregation: true use_variants: false - name: gpt_groundedness type: llm source: type: code path: rag_groundedness_prompt.jinja2 inputs: deployment_name: gpt-4 temperature: 0 top_p: 1 stop: "" max_tokens: 1000 presence_penalty: 0 frequency_penalty: 0 logit_bias: "" FullBody: ${inputs.documents} answer: ${inputs.answer} question: ${inputs.question} provider: AzureOpenAI connection: open_ai_connection api: chat module: promptflow.tools.aoai activate: when: ${validate_input.output.gpt_groundedness} is: true use_variants: false - name: gpt_retrieval_score type: llm source: type: code path: rag_retrieval_prompt.jinja2 inputs: deployment_name: gpt-4 temperature: 0 top_p: 1 stop: "" max_tokens: 1000 presence_penalty: 0 frequency_penalty: 0 logit_bias: "" FullBody: ${inputs.documents} question: ${inputs.question} provider: AzureOpenAI connection: open_ai_connection api: chat module: promptflow.tools.aoai activate: when: ${validate_input.output.gpt_retrieval_score} is: true use_variants: false - name: gpt_relevance type: llm source: type: code path: rag_generation_prompt.jinja2 inputs: deployment_name: gpt-4 temperature: 0 top_p: 1 stop: "" max_tokens: 1000 presence_penalty: 0 frequency_penalty: 0 logit_bias: "" FullBody: ${inputs.documents} answer: ${inputs.answer} question: ${inputs.question} provider: AzureOpenAI connection: open_ai_connection api: chat module: promptflow.tools.aoai activate: when: ${validate_input.output.gpt_relevance} is: true use_variants: false - name: parse_generation_score type: python source: type: code path: parse_generation_score.py inputs: rag_generation_score: ${gpt_relevance.output} use_variants: false - name: parse_retrieval_score type: python source: type: code path: parse_retrival_score.py inputs: retrieval_output: ${gpt_retrieval_score.output} use_variants: false - name: parse_grounding_score type: python source: type: code path: parse_groundedness_score.py inputs: rag_grounding_score: ${gpt_groundedness.output} use_variants: false - name: select_metrics type: python source: type: code path: select_metrics.py inputs: metrics: ${inputs.metrics} use_variants: false - name: validate_input type: python source: type: code path: validate_input.py inputs: answer: ${inputs.answer} documents: ${inputs.documents} question: ${inputs.question} selected_metrics: ${select_metrics.output} use_variants: false node_variants: {} environment: python_requirements_txt: requirements.txt
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/parse_generation_score.py
from promptflow import tool import re @tool def parse_generation_output(rag_generation_score: str) -> str: quality_score = float('nan') quality_reasoning = '' for sent in rag_generation_score.split('\n'): sent = sent.strip() if re.match(r"\s*(<)?Quality score:", sent): numbers_found = re.findall(r"(\d+\.*\d*)\/", sent) if len(numbers_found) == 0: continue quality_score = int( float(numbers_found[0].replace("'", ""))) for sent in rag_generation_score.split('\n'): sent = sent.strip() if re.match(r"\s*(<)?Quality score reasoning:", sent): quality_reasoning += sent.strip() break return {"quality_score": quality_score, "quality_reasoning": quality_reasoning}
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/parse_groundedness_score.py
from promptflow import tool import re @tool def parse_grounding_output(rag_grounding_score: str) -> str: try: numbers_found = re.findall(r"Quality score:\s*(\d+)\/\d", rag_grounding_score) score = float(numbers_found[0]) if len(numbers_found) > 0 else 0 except Exception: score = float("nan") try: quality_reasoning, _ = rag_grounding_score.split("Quality score: ") except Exception: quality_reasoning = rag_grounding_score return {"quality_score": score, "quality_reasoning": quality_reasoning}
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/concat_scores.py
from promptflow import tool import numpy as np @tool def concat_results(rag_retrieval_score: dict = None, rag_grounding_score: dict = None, rag_generation_score: dict = None): load_list = [{'name': 'gpt_groundedness', 'result': rag_grounding_score}, {'name': 'gpt_retrieval_score', 'result': rag_retrieval_score}, {'name': 'gpt_relevance', 'result': rag_generation_score}] score_list = [] errors = [] for item in load_list: if item['result']: try: score = float(item['result']["quality_score"]) except Exception as e: score = np.nan errors.append({"name": item["name"], "msg": str(e), "data": item['result']}) reasoning = item['result']['quality_reasoning'] else: score = np.nan reasoning = None score_list.append({"name": item["name"], "score": score, "quality_reasoning": reasoning}) variant_level_result = {} for item in score_list: item_name = str(item["name"]) variant_level_result[item_name] = item["score"] return variant_level_result
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/select_metrics.py
from promptflow import tool @tool def select_metrics(metrics: str) -> str: supported_metrics = ('gpt_relevance', 'gpt_groundedness', 'gpt_retrieval_score') user_selected_metrics = [metric.strip() for metric in metrics.split(',') if metric] metric_selection_dict = {} for metric in supported_metrics: if metric in user_selected_metrics: metric_selection_dict[metric] = True else: metric_selection_dict[metric] = False return metric_selection_dict
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-qna-rag-metrics/rag_generation_prompt.jinja2
system: You will be provided a question, a conversation history, fetched documents related to the question and a response to the question in the domain. You task is to evaluate the quality of the provided response by following the steps below: - Understand the context of the question based on the conversation history. - Generate a reference answer that is only based on the conversation history, question, and fetched documents. Don't generate the reference answer based on your own knowledge. - You need to rate the provided response according to the reference answer if it's available on a scale of 1 (poor) to 5 (excellent), based on the below criteria: - 5 - Ideal: The provided response includes all information necessary to answer the question based on the reference answer and conversation history. Please be strict about giving a 5 score. - 4 - Mostly Relevant: The provided response is mostly relevant, although it may be a little too narrow or too broad based on the reference answer and conversation history. - 3 - Somewhat Relevant: The provided response may be partly helpful but might be hard to read or contain other irrelevant content based on the reference answer and conversation history. - 2 - Barely Relevant: The provided response is barely relevant, perhaps shown as a last resort based on the reference answer and conversation history. - 1 - Completely Irrelevant: The provided response should never be used for answering this question based on the reference answer and conversation history. - You need to rate the provided response to be 5, if the reference answer can not be generated since no relevant documents were retrieved. - You need to first provide a scoring reason for the evaluation according to the above criteria, and then provide a score for the quality of the provided response. - You need to translate the provided response into English if it's in another language. - Your final response must include both the reference answer and the evaluation result. The evaluation result should be written in English. Your response should be in the following format: ``` [assistant](#evaluation result) <start reference answer> [insert the reference answer here] <end reference answer> <start quality score reasoning> Quality score reasoning: [insert score reasoning here] <end quality score reasoning> <start quality score> Quality score: [insert score here]/5 <end quality score> ``` - Your answer must end with <|im_end|>. user: #conversation history #question {{question}} #fetched documents {{FullBody}} #provided response {{answer}} assistant: #evaluation result <start reference answer>"""
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-entity-match-rate/data.jsonl
{"entities": ["software engineer","CEO"],"ground_truth": "\"CEO, Software Engineer, Finance Manager\""} {"entities": ["Software Engineer","CEO", "Finance Manager"],"ground_truth": "\"CEO, Software Engineer, Finance Manager\""}
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-entity-match-rate/requirements.txt
promptflow promptflow-tools
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-entity-match-rate/match.py
from promptflow import tool from typing import List @tool def match(answer: List[str], ground_truth: List[str]): exact_match = 0 partial_match = 0 if is_match(answer, ground_truth, ignore_case=True, ignore_order=True, allow_partial=False): exact_match = 1 if is_match(answer, ground_truth, ignore_case=True, ignore_order=True, allow_partial=True): partial_match = 1 return {"exact_match": exact_match, "partial_match": partial_match, "answer": answer, "ground_truth": ground_truth} def is_match( answer: List[str], ground_truth: List[str], ignore_case: bool, ignore_order: bool, allow_partial: bool) -> bool: if ignore_case: answer = [a.lower() for a in answer] ground_truth = [g.lower() for g in ground_truth] if ignore_order: answer.sort() ground_truth.sort() if allow_partial: x = [a for a in answer if a in ground_truth] return x == answer return answer == ground_truth
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-entity-match-rate/is_match_test.py
import unittest from match import is_match class IsMatchTest(unittest.TestCase): def test_normal(self): self.assertEqual(is_match(["a", "b"], ["B", "a"], True, True, False), True) self.assertEqual(is_match(["a", "b"], ["B", "a"], True, False, False), False) self.assertEqual(is_match(["a", "b"], ["B", "a"], False, True, False), False) self.assertEqual(is_match(["a", "b"], ["B", "a"], False, False, True), False) self.assertEqual(is_match(["a", "b"], ["a", "b"], False, False, False), True) self.assertEqual(is_match(["a", "b"], ["a", "b", "c"], True, False, True), True)
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-entity-match-rate/README.md
# Entity match rate evaluation This is a flow evaluates: entity match rate. Tools used in this flow: - `python` tool ## Prerequisites Install promptflow sdk and other dependencies: ```bash pip install -r requirements.txt ``` ### 1. Test flow/node ```bash # test with default input value in flow.dag.yaml pf flow test --flow . ``` ### 2. create flow run with multi line data ```bash pf run create --flow . --data ./data.jsonl --column-mapping ground_truth='${data.ground_truth}' entities='${data.entities}' --stream ``` You can also skip providing `column-mapping` if provided data has same column name as the flow. Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI.
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-entity-match-rate/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: entities: type: list default: - software engineer - CEO ground_truth: type: string default: '"CEO, Software Engineer, Finance Manager"' outputs: match_cnt: type: object reference: ${match.output} nodes: - name: cleansing type: python source: type: code path: cleansing.py inputs: entities_str: ${inputs.ground_truth} - name: match type: python source: type: code path: match.py inputs: answer: ${inputs.entities} ground_truth: ${cleansing.output} - name: log_metrics type: python source: type: code path: log_metrics.py inputs: match_counts: ${match.output} aggregation: true environment: python_requirements_txt: requirements.txt
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-entity-match-rate/log_metrics.py
from promptflow import tool from typing import List from promptflow import log_metric # The inputs section will change based on the arguments of the tool function, after you save the code # Adding type to arguments and return value will help the system show the types properly # Please update the function name/signature per need @tool def log_metrics(match_counts: List[dict]): exact_match_rate = sum([m["exact_match"] for m in match_counts]) / len(match_counts) partial_match_rate = sum([m["partial_match"] for m in match_counts]) / len(match_counts) log_metric(key="exact_match_rate", value=exact_match_rate) log_metric(key="partial_match_rate", value=partial_match_rate) print("exact_match_rate: ", exact_match_rate) print("partial_match_rate: ", partial_match_rate) return {"exact_match_rate": exact_match_rate, "partial_match_rate": partial_match_rate}
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-entity-match-rate/cleansing.py
from typing import List from promptflow import tool @tool def cleansing(entities_str: str) -> List[str]: # Split, remove leading and trailing spaces/tabs/dots parts = entities_str.split(",") cleaned_parts = [part.strip(" \t.\"") for part in parts] entities = [part for part in cleaned_parts if len(part) > 0] return entities
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-chat-math/data.jsonl
{"groundtruth": "10","prediction": "10"} {"groundtruth": "253","prediction": "506"} {"groundtruth": "1/3","prediction": "2/6"}
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-chat-math/line_process.py
from promptflow import tool def string_to_number(raw_string: str) -> float: ''' Try to parse the prediction string and groundtruth string to float number. Support parse int, float, fraction and recognize non-numeric string with wrong format. Wrong format cases: 'the answer is \box{2/3}', '0, 5, or any number greater than 11', '4/7//9' ''' float_number = 0.0 try: float_number = float(raw_string) except Exception: if '/' in raw_string: split_list = raw_string.split('/') if len(split_list) == 2: numerator, denominator = split_list try: float_number = float(numerator) / float(denominator) except Exception: return None else: return None else: return None return float_number @tool def line_process(groundtruth: str, prediction: str) -> int: pred_float = string_to_number(prediction) '''Early stop''' if (pred_float is None): return -1 gt_float = string_to_number(groundtruth) if (gt_float is None): return -1 ''' both pred_float and gt_float are valid''' if round(pred_float, 10) == round(gt_float, 10): return 1 else: return -1 if __name__ == "__main__": processed_result = line_process("3/5", "6/10") print("The processed result is", processed_result) processed_result = line_process("1/2", "0.5") print("The processed result is", processed_result) processed_result = line_process("3", "5") print("The processed result is", processed_result) processed_result = line_process("2/3", "the answer is \box{2/3}") print("The processed result is", processed_result)
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-chat-math/requirements.txt
promptflow promptflow-tools
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-chat-math/aggregate.py
from typing import List from promptflow import tool from promptflow import log_metric @tool def accuracy_aggregate(processed_results: List[int]): num_exception = 0 num_correct = 0 for i in range(len(processed_results)): if processed_results[i] == -1: num_exception += 1 elif processed_results[i] == 1: num_correct += 1 num_total = len(processed_results) accuracy = round(1.0 * num_correct / num_total, 2) error_rate = round(1.0 * num_exception / num_total, 2) log_metric(key="accuracy", value=accuracy) log_metric(key="error_rate", value=error_rate) return { "num_total": num_total, "num_correct": num_correct, "num_exception": num_exception, "accuracy": accuracy, "error_rate": error_rate } if __name__ == "__main__": numbers = [1, 1, 1, 1, 0, -1, -1] accuracy = accuracy_aggregate(numbers) print("The accuracy is", accuracy)
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-chat-math/README.md
# Eval chat math This example shows how to evaluate the answer of math questions, which can compare the output results with the standard answers numerically. Learn more on corresponding [tutorials](../../../tutorials/flow-fine-tuning-evaluation/promptflow-quality-improvement.md) Tools used in this flow: - `python` tool ## Prerequisites Install promptflow sdk and other dependencies in this folder: ```bash pip install -r requirements.txt ``` ### 1. Test flow with single line data Testing flow/node: ```bash # test with default input value in flow.dag.yaml pf flow test --flow . # test with flow inputs pf flow test --flow . --inputs groundtruth=123 prediction=123 # test node with inputs pf flow test --flow . --node line_process --inputs groundtruth=123 prediction=123 ``` ### 2. create flow run with multi line data There are two ways to evaluate an classification flow. ```bash pf run create --flow . --data ./data.jsonl --stream ```
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-chat-math/flow.dag.yaml
inputs: groundtruth: type: string default: "10" is_chat_input: false prediction: type: string default: "10" is_chat_input: false outputs: score: type: string reference: ${line_process.output} nodes: - name: line_process type: python source: type: code path: line_process.py inputs: groundtruth: ${inputs.groundtruth} prediction: ${inputs.prediction} use_variants: false - name: aggregate type: python source: type: code path: aggregate.py inputs: processed_results: ${line_process.output} aggregation: true use_variants: false node_variants: {} $schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-classification-accuracy/data.jsonl
{"groundtruth": "App","prediction": "App"} {"groundtruth": "Channel","prediction": "Channel"} {"groundtruth": "Academic","prediction": "Academic"}
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-classification-accuracy/calculate_accuracy.py
from typing import List from promptflow import log_metric, tool @tool def calculate_accuracy(grades: List[str]): result = [] for index in range(len(grades)): grade = grades[index] result.append(grade) # calculate accuracy for each variant accuracy = round((result.count("Correct") / len(result)), 2) log_metric("accuracy", accuracy) return result
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-classification-accuracy/requirements.txt
promptflow promptflow-tools
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-classification-accuracy/grade.py
from promptflow import tool @tool def grade(groundtruth: str, prediction: str): return "Correct" if groundtruth.lower() == prediction.lower() else "Incorrect"
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-classification-accuracy/README.md
# Classification Accuracy Evaluation This is a flow illustrating how to evaluate the performance of a classification system. It involves comparing each prediction to the groundtruth and assigns a "Correct" or "Incorrect" grade, and aggregating the results to produce metrics such as accuracy, which reflects how good the system is at classifying the data. Tools used in this flow: - `python` tool ## What you will learn In this flow, you will learn - how to compose a point based evaluation flow, where you can calculate point-wise metrics. - the way to log metrics. use `from promptflow import log_metric` - see file [calculate_accuracy.py](calculate_accuracy.py) ### 0. Setup connection Prepare your Azure Open AI resource follow this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one. ```bash # Override keys with --set to avoid yaml file changes pf connection create --file ../../../connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base> ``` ### 1. Test flow/node ```bash # test with default input value in flow.dag.yaml pf flow test --flow . # test with flow inputs pf flow test --flow . --inputs groundtruth=APP prediction=APP # test node with inputs pf flow test --flow . --node grade --inputs groundtruth=groundtruth prediction=prediction ``` ### 2. create flow run with multi line data There are two ways to evaluate an classification flow. ```bash pf run create --flow . --data ./data.jsonl --column-mapping groundtruth='${data.groundtruth}' prediction='${data.prediction}' --stream ``` You can also skip providing `column-mapping` if provided data has same column name as the flow. Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. ### 3. create run against other flow run Learn more in [web-classification](../../standard/web-classification/README.md)
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-classification-accuracy/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: groundtruth: type: string description: Please specify the groundtruth column, which contains the true label to the outputs that your flow produces. default: APP prediction: type: string description: Please specify the prediction column, which contains the predicted outputs that your flow produces. default: APP outputs: grade: type: string reference: ${grade.output} nodes: - name: grade type: python source: type: code path: grade.py inputs: groundtruth: ${inputs.groundtruth} prediction: ${inputs.prediction} - name: calculate_accuracy type: python source: type: code path: calculate_accuracy.py inputs: grades: ${grade.output} aggregation: true environment: python_requirements_txt: requirements.txt
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-groundedness/data.jsonl
{"question": "What is the name of the new language representation model introduced in the document?", "variant_id": "v1", "line_number":1, "answer":"The document mentions multiple language representation models, so it is unclear which one is being referred to as \"new\". Can you provide more specific information or context?","context":["statistical language modeling. arXiv preprint arXiv:1312.3005 . Z. Chen, H. Zhang, X. Zhang, and L. Zhao. 2018. Quora question pairs. Christopher Clark and Matt Gardner. 2018. Simple and effective multi-paragraph reading comprehen- sion. In ACL.Kevin Clark, Minh-Thang Luong, Christopher D Man- ning, and Quoc Le. 2018. Semi-supervised se- quence modeling with cross-view training. In Pro- ceedings of the 2018 Conference on Empirical Meth- ods in Natural Language Processing , pages 1914\u2013 1925. Ronan Collobert and Jason Weston. 2008. A uni\ufb01ed architecture for natural language processing: Deep neural networks with multitask learning. In Pro- ceedings of the 25th international conference on Machine learning , pages 160\u2013167. ACM. Alexis Conneau, Douwe Kiela, Holger Schwenk, Lo \u00a8\u0131c Barrault, and Antoine Bordes. 2017. Supervised learning of universal sentence representations from natural language inference data. In Proceedings of the 2017 Conference on Empirical Methods in Nat- ural Language Processing , pages 670\u2013680, Copen- hagen, Denmark. Association for Computational Linguistics. Andrew M Dai and Quoc V Le. 2015. Semi-supervised sequence learning. In Advances in neural informa- tion processing systems , pages 3079\u20133087. J. Deng, W. Dong, R. Socher, L.-J. Li, K. Li, and L. Fei- Fei. 2009. ImageNet: A Large-Scale Hierarchical Image Database. In CVPR09 . William B Dolan and Chris Brockett. 2005. Automati- cally constructing a corpus of sentential paraphrases. InProceedings of the Third International Workshop on Paraphrasing (IWP2005) . William Fedus, Ian Goodfellow, and Andrew M Dai. 2018. Maskgan: Better text generation via \ufb01lling in the.arXiv preprint arXiv:1801.07736 . Dan Hendrycks and Kevin Gimpel. 2016. Bridging nonlinearities and stochastic regularizers with gaus- sian error linear units. CoRR , abs\/1606.08415. Felix Hill, Kyunghyun Cho, and Anna Korhonen. 2016. Learning distributed representations of sentences from unlabelled data. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies . Association for Computa- tional Linguistics. Jeremy Howard and Sebastian Ruder. 2018. Universal language model \ufb01ne-tuning for text classi\ufb01cation. In ACL. Association for Computational Linguistics. Minghao Hu, Yuxing Peng, Zhen Huang, Xipeng Qiu, Furu Wei, and Ming Zhou. 2018. Reinforced mnemonic reader for machine reading comprehen- sion. In IJCAI . Yacine Jernite, Samuel R. Bowman, and David Son- tag. 2017. Discourse-based objectives for fast un- supervised sentence representation learning. CoRR , abs\/1705.00557.Mandar Joshi, Eunsol Choi, Daniel S Weld, and Luke Zettlemoyer. 2017. Triviaqa: A large scale distantly supervised challenge dataset for reading comprehen- sion. In ACL. Ryan Kiros, Yukun Zhu, Ruslan R Salakhutdinov, Richard Zemel, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. 2015. Skip-thought vectors. In Advances in neural information processing systems , pages 3294\u20133302. Quoc Le and Tomas Mikolov. 2014. Distributed rep- resentations of sentences and documents. In Inter- national Conference on Machine Learning , pages 1188\u20131196. Hector J Levesque, Ernest Davis, and Leora Morgen- stern. 2011. The winograd schema challenge. In Aaai spring symposium: Logical formalizations of commonsense reasoning , volume 46, page 47. Lajanugen Logeswaran and Honglak Lee. 2018. An ef\ufb01cient framework for learning sentence represen- tations. In International Conference on Learning Representations . Bryan McCann, James Bradbury, Caiming Xiong, and Richard Socher. 2017. Learned in translation: Con-","tool for measuring readability. Journalism Bulletin , 30(4):415\u2013433. Erik F Tjong Kim Sang and Fien De Meulder. 2003. Introduction to the conll-2003 shared task: Language-independent named entity recognition. In CoNLL . Joseph Turian, Lev Ratinov, and Yoshua Bengio. 2010. Word representations: A simple and general method for semi-supervised learning. In Proceedings of the 48th Annual Meeting of the Association for Compu- tational Linguistics , ACL \u201910, pages 384\u2013394. Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems , pages 6000\u20136010. Pascal Vincent, Hugo Larochelle, Yoshua Bengio, and Pierre-Antoine Manzagol. 2008. Extracting and composing robust features with denoising autoen- coders. In Proceedings of the 25th international conference on Machine learning , pages 1096\u20131103. ACM. Alex Wang, Amanpreet Singh, Julian Michael, Fe- lix Hill, Omer Levy, and Samuel Bowman. 2018a. Glue: A multi-task benchmark and analysis platformfor natural language understanding. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: An- alyzing and Interpreting Neural Networks for NLP , pages 353\u2013355. Wei Wang, Ming Yan, and Chen Wu. 2018b. Multi- granularity hierarchical attention fusion networks for reading comprehension and question answering. InProceedings of the 56th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers) . Association for Computational Lin- guistics. Alex Warstadt, Amanpreet Singh, and Samuel R Bow- man. 2018. Neural network acceptability judg- ments. arXiv preprint arXiv:1805.12471 . Adina Williams, Nikita Nangia, and Samuel R Bow- man. 2018. A broad-coverage challenge corpus for sentence understanding through inference. In NAACL . Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, et al. 2016. Google\u2019s neural ma- chine translation system: Bridging the gap between human and machine translation. arXiv preprint arXiv:1609.08144 . Jason Yosinski, Jeff Clune, Yoshua Bengio, and Hod Lipson. 2014. How transferable are features in deep neural networks? In Advances in neural information processing systems , pages 3320\u20133328. Adams Wei Yu, David Dohan, Minh-Thang Luong, Rui Zhao, Kai Chen, Mohammad Norouzi, and Quoc V Le. 2018. QANet: Combining local convolution with global self-attention for reading comprehen- sion. In ICLR . Rowan Zellers, Yonatan Bisk, Roy Schwartz, and Yejin Choi. 2018. Swag: A large-scale adversarial dataset for grounded commonsense inference. In Proceed- ings of the 2018 Conference on Empirical Methods in Natural Language Processing (EMNLP) . Yukun Zhu, Ryan Kiros, Rich Zemel, Ruslan Salakhut- dinov, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. 2015. Aligning books and movies: Towards story-like visual explanations by watching movies and reading books. In Proceedings of the IEEE international conference on computer vision , pages 19\u201327. Appendix for \u201cBERT: Pre-training of Deep Bidirectional Transformers for Language Understanding\u201d We organize the appendix into three sections: \u2022 Additional implementation details for BERT are presented in Appendix A;\u2022 Additional details for our experiments are presented in Appendix B; and \u2022 Additional ablation studies are presented in Appendix C. We present additional ablation studies for BERT including: \u2013Effect of Number of Training Steps; and \u2013Ablation for Different"]} {"question": "What is the main difference between BERT and previous language representation models?", "variant_id": "v1", "line_number": 2, "answer":"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers, allowing it to incorporate context from both directions. This is unlike previous language representation models that are unidirectional, which limits the choice of architectures that can be used during pre-training and could be sub-optimal for sentence-level tasks and token-level tasks such as question answering.","context":["BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding Jacob Devlin Ming-Wei Chang Kenton Lee Kristina Toutanova Google AI Language fjacobdevlin,mingweichang,kentonl,kristout [email protected] Abstract We introduce a new language representa- tion model called BERT , which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language repre- sentation models (Peters et al., 2018a; Rad- ford et al., 2018), BERT is designed to pre- train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a re- sult, the pre-trained BERT model can be \ufb01ne- tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task- speci\ufb01c architecture modi\ufb01cations. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art re- sults on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answer- ing Test F1 to 93.2 (1.5 point absolute im- provement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement). 1 Introduction Language model pre-training has been shown to be effective for improving many natural language processing tasks (Dai and Le, 2015; Peters et al., 2018a; Radford et al., 2018; Howard and Ruder, 2018). These include sentence-level tasks such as natural language inference (Bowman et al., 2015; Williams et al., 2018) and paraphrasing (Dolan and Brockett, 2005), which aim to predict the re- lationships between sentences by analyzing them holistically, as well as token-level tasks such as named entity recognition and question answering, where models are required to produce \ufb01ne-grained output at the token level (Tjong Kim Sang and De Meulder, 2003; Rajpurkar et al., 2016).There are two existing strategies for apply- ing pre-trained language representations to down- stream tasks: feature-based and\ufb01ne-tuning . The feature-based approach, such as ELMo (Peters et al., 2018a), uses task-speci\ufb01c architectures that include the pre-trained representations as addi- tional features. The \ufb01ne-tuning approach, such as the Generative Pre-trained Transformer (OpenAI GPT) (Radford et al., 2018), introduces minimal task-speci\ufb01c parameters, and is trained on the downstream tasks by simply \ufb01ne-tuning allpre- trained parameters. The two approaches share the same objective function during pre-training, where they use unidirectional language models to learn general language representations. We argue that current techniques restrict the power of the pre-trained representations, espe- cially for the \ufb01ne-tuning approaches. The ma- jor limitation is that standard language models are unidirectional, and this limits the choice of archi- tectures that can be used during pre-training. For example, in OpenAI GPT, the authors use a left-to- right architecture, where every token can only at- tend to previous tokens in the self-attention layers of the Transformer (Vaswani et al., 2017). Such re- strictions are sub-optimal for sentence-level tasks, and could be very harmful when applying \ufb01ne- tuning based approaches to token-level tasks such as question answering, where it is crucial to incor- porate context from both directions. In this paper, we improve the \ufb01ne-tuning based approaches by proposing BERT: Bidirectional Encoder Representations from Transformers.","the self-attention layers of the Transformer (Vaswani et al., 2017). Such re- strictions are sub-optimal for sentence-level tasks, and could be very harmful when applying \ufb01ne- tuning based approaches to token-level tasks such as question answering, where it is crucial to incor- porate context from both directions. In this paper, we improve the \ufb01ne-tuning based approaches by proposing BERT: Bidirectional Encoder Representations from Transformers. BERT alleviates the previously mentioned unidi- rectionality constraint by using a \u201cmasked lan- guage model\u201d (MLM) pre-training objective, in- spired by the Cloze task (Taylor, 1953). The masked language model randomly masks some of the tokens from the input, and the objective is to predict the original vocabulary id of the maskedarXiv:1810.04805v2 [cs.CL] 24 May 2019word based only on its context. Unlike left-to- right language model pre-training, the MLM ob- jective enables the representation to fuse the left and the right context, which allows us to pre- train a deep bidirectional Transformer. In addi- tion to the masked language model, we also use a \u201cnext sentence prediction\u201d task that jointly pre- trains text-pair representations. The contributions of our paper are as follows: \u2022 We demonstrate the importance of bidirectional pre-training for language representations. Un- like Radford et al. (2018), which uses unidirec- tional language models for pre-training, BERT uses masked language models to enable pre- trained deep bidirectional representations. This is also in contrast to Peters et al. (2018a), which uses a shallow concatenation of independently trained left-to-right and right-to-left LMs. \u2022 We show that pre-trained representations reduce the need for many heavily-engineered task- speci\ufb01c architectures. BERT is the \ufb01rst \ufb01ne- tuning based representation model that achieves state-of-the-art performance on a large suite of sentence-level andtoken-level tasks, outper- forming many task-speci\ufb01c architectures. \u2022 BERT advances the state of the art for eleven NLP tasks. The code and pre-trained mod- els are available at https:\/\/github.com\/ google-research\/bert . 2 Related Work There is a long history of pre-training general lan- guage representations, and we brie\ufb02y review the most widely-used approaches in this section. 2.1 Unsupervised Feature-based Approaches Learning widely applicable representations of words has been an active area of research for decades, including non-neural (Brown et al., 1992; Ando and Zhang, 2005; Blitzer et al., 2006) and neural (Mikolov et al., 2013; Pennington et al., 2014) methods. Pre-trained word embeddings are an integral part of modern NLP systems, of- fering signi\ufb01cant improvements over embeddings learned from scratch (Turian et al., 2010). To pre- train word embedding vectors, left-to-right lan- guage modeling objectives have been used (Mnih and Hinton, 2009), as well as objectives to dis- criminate correct from incorrect words in left and right context (Mikolov et al., 2013).These approaches have been generalized to coarser granularities, such as sentence embed- dings (Kiros et al., 2015; Logeswaran and Lee, 2018) or paragraph embeddings (Le and Mikolov, 2014). To train sentence representations, prior work has used objectives to rank candidate next sentences (Jernite et al., 2017; Logeswaran and Lee, 2018), left-to-right generation of next sen- tence words given a representation of the previous sentence (Kiros et al., 2015), or denoising auto- encoder","the token-level classi\ufb01er over the NER label set. To ablate the \ufb01ne-tuning approach, we apply the feature-based approach by extracting the activa- tions from one or more layers without \ufb01ne-tuning any parameters of BERT. These contextual em- beddings are used as input to a randomly initial- ized two-layer 768-dimensional BiLSTM before the classi\ufb01cation layer. Results are presented in Table 7. BERT LARGE performs competitively with state-of-the-art meth- ods. The best performing method concatenates the token representations from the top four hidden lay- ers of the pre-trained Transformer, which is only 0.3 F1 behind \ufb01ne-tuning the entire model. This demonstrates that BERT is effective for both \ufb01ne- tuning and feature-based approaches. 6 Conclusion Recent empirical improvements due to transfer learning with language models have demonstrated that rich, unsupervised pre-training is an integral part of many language understanding systems. In particular, these results enable even low-resource tasks to bene\ufb01t from deep unidirectional architec- tures. Our major contribution is further general- izing these \ufb01ndings to deep bidirectional architec- tures, allowing the same pre-trained model to suc- cessfully tackle a broad set of NLP tasks.References Alan Akbik, Duncan Blythe, and Roland V ollgraf. 2018. Contextual string embeddings for sequence labeling. In Proceedings of the 27th International Conference on Computational Linguistics , pages 1638\u20131649. Rami Al-Rfou, Dokook Choe, Noah Constant, Mandy Guo, and Llion Jones. 2018. Character-level lan- guage modeling with deeper self-attention. arXiv preprint arXiv:1808.04444 . Rie Kubota Ando and Tong Zhang. 2005. A framework for learning predictive structures from multiple tasks and unlabeled data. Journal of Machine Learning Research , 6(Nov):1817\u20131853. Luisa Bentivogli, Bernardo Magnini, Ido Dagan, Hoa Trang Dang, and Danilo Giampiccolo. 2009. The \ufb01fth PASCAL recognizing textual entailment challenge. In TAC. NIST. John Blitzer, Ryan McDonald, and Fernando Pereira. 2006. Domain adaptation with structural correspon- dence learning. In Proceedings of the 2006 confer- ence on empirical methods in natural language pro- cessing , pages 120\u2013128. Association for Computa- tional Linguistics. Samuel R. Bowman, Gabor Angeli, Christopher Potts, and Christopher D. Manning. 2015. A large anno- tated corpus for learning natural language inference. InEMNLP . Association for Computational Linguis- tics. Peter F Brown, Peter V Desouza, Robert L Mercer, Vincent J Della Pietra, and Jenifer C Lai. 1992. Class-based n-gram models of natural language. Computational linguistics , 18(4):467\u2013479. Daniel Cer, Mona Diab, Eneko Agirre, Inigo Lopez- Gazpio, and Lucia Specia. 2017. Semeval-2017 task 1: Semantic textual similarity multilingual and crosslingual focused evaluation. In Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017) , pages 1\u201314, Vancou- ver, Canada. Association for Computational Lin- guistics. Ciprian Chelba, Tomas Mikolov, Mike Schuster, Qi Ge, Thorsten Brants, Phillipp Koehn, and Tony Robin- son. 2013. One billion word benchmark for measur- ing progress in statistical language modeling. arXiv preprint arXiv:1312.3005 . Z. Chen, H. Zhang, X. Zhang, and L. Zhao. 2018. Quora question pairs. Christopher Clark and Matt Gardner. 2018. Simple and effective multi-paragraph reading comprehen- sion. In ACL.Kevin Clark, Minh-Thang Luong, Christopher D Man- ning, and Quoc Le. 2018. Semi-supervised se- quence modeling with cross-view training. In Pro- ceedings of the 2018 Conference on Empirical Meth- ods"]}
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-groundedness/requirements.txt
promptflow promptflow-tools
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-groundedness/aggregate.py
from typing import List from promptflow import tool @tool def aggregate(groundedness_scores: List[float]): """ This tool aggregates the processed result of all lines to the variant level and log metric for each variant. :param processed_results: List of the output of line_process node. :param variant_ids: List of variant ids that can be used to group the results by variant. :param line_numbers: List of line numbers of the variants. If provided, this can be used to group the results by line number. """ aggregated_results = {"groundedness": 0.0, "count": 0} # Calculate average groundedness score for each variant for i in range(len(groundedness_scores)): aggregated_results["groundedness"] += groundedness_scores[i] aggregated_results["count"] += 1 aggregated_results["groundedness"] /= aggregated_results["count"] # Log metric for each variant from promptflow import log_metric log_metric(key="groundedness", value=aggregated_results["groundedness"]) return aggregated_results
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-groundedness/calc_groundedness.py
from promptflow import tool import re @tool def parse_score(gpt_score: str): return float(extract_float(gpt_score)) def extract_float(s): match = re.search(r"[-+]?\d*\.\d+|\d+", s) if match: return float(match.group()) else: return None
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-groundedness/README.md
# Groundedness Evaluation This is a flow leverage llm to eval groundedness: whether answer is stating facts that are all present in the given context. Tools used in this flow: - `python` tool - built-in `llm` tool ### 0. Setup connection Prepare your Azure Open AI resource follow this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one. ```bash # Override keys with --set to avoid yaml file changes pf connection create --file ../../../connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base> ``` ### 1. Test flow/node ```bash # test with default input value in flow.dag.yaml pf flow test --flow . ``` ### 2. create flow run with multi line data ```bash pf run create --flow . --data ./data.jsonl --column-mapping question='${data.question}' answer='${data.answer}' context='${data.context}' --stream ``` You can also skip providing `column-mapping` if provided data has same column name as the flow. Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI.
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-groundedness/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json environment: python_requirements_txt: requirements.txt inputs: question: type: string default: What is the name of the new language representation model introduced in the document? answer: type: string default: The document mentions multiple language representation models, so it is unclear which one is being referred to as \"new\". Can you provide more specific information or context? context: type: string default: '["statistical language modeling. arXiv preprint arXiv:1312.3005 . Z. Chen, H. Zhang, X. Zhang, and L. Zhao. 2018. Quora question pairs. Christopher Clark and Matt Gardner. 2018. Simple and effective multi-paragraph reading comprehen- sion. In ACL.Kevin Clark, Minh-Thang Luong, Christopher D Man- ning, and Quoc Le. 2018. Semi-supervised se- quence modeling with cross-view training. In Pro- ceedings of the 2018 Conference on Empirical Meth- ods in Natural Language Processing , pages 1914\u2013 1925. Ronan Collobert and Jason Weston. 2008. A uni\ufb01ed architecture for natural language processing: Deep neural networks with multitask learning. In Pro- ceedings of the 25th international conference on Machine learning , pages 160\u2013167. ACM. Alexis Conneau, Douwe Kiela, Holger Schwenk, Lo \u00a8\u0131c Barrault, and Antoine Bordes. 2017. Supervised learning of universal sentence representations from natural language inference data. In Proceedings of the 2017 Conference on Empirical Methods in Nat- ural Language Processing , pages 670\u2013680, Copen- hagen, Denmark. Association for Computational Linguistics. Andrew M Dai and Quoc V Le. 2015. Semi-supervised sequence learning. In Advances in neural informa- tion processing systems , pages 3079\u20133087. J. Deng, W. Dong, R. Socher, L.-J. Li, K. Li, and L. Fei- Fei. 2009. ImageNet: A Large-Scale Hierarchical Image Database. In CVPR09 . William B Dolan and Chris Brockett. 2005. Automati- cally constructing a corpus of sentential paraphrases. InProceedings of the Third International Workshop on Paraphrasing (IWP2005) . William Fedus, Ian Goodfellow, and Andrew M Dai. 2018. Maskgan: Better text generation via \ufb01lling in the.arXiv preprint arXiv:1801.07736 . Dan Hendrycks and Kevin Gimpel. 2016. Bridging nonlinearities and stochastic regularizers with gaussian error linear units. CoRR , abs\/1606.08415. Felix Hill, Kyunghyun Cho, and Anna Korhonen. 2016. Learning distributed representations of sentences from unlabelled data. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies . Association for Computa- tional Linguistics. Jeremy Howard and Sebastian Ruder. 2018. Universal language model \ufb01ne-tuning for text classi\ufb01cation. In ACL. Association for Computational Linguistics. Minghao Hu, Yuxing Peng, Zhen Huang, Xipeng Qiu, Furu Wei, and Ming Zhou. 2018. Reinforced mnemonic reader for machine reading comprehen- sion. In IJCAI . Yacine Jernite, Samuel R. Bowman, and David Son- tag. 2017. Discourse-based objectives for fast un- supervised sentence representation learning. CoRR , abs\/1705.00557.Mandar Joshi, Eunsol Choi, Daniel S Weld, and Luke Zettlemoyer. 2017. Triviaqa: A large scale distantly supervised challenge dataset for reading comprehen- sion. In ACL. Ryan Kiros, Yukun Zhu, Ruslan R Salakhutdinov, Richard Zemel, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. 2015. Skip-thought vectors. In Advances in neural information processing systems , pages 3294\u20133302. Quoc Le and Tomas Mikolov. 2014. Distributed rep- resentations of sentences and documents. In Inter- national Conference on Machine Learning , pages 1188\u20131196. Hector J Levesque, Ernest Davis, and Leora Morgen- stern. 2011. The winograd schema challenge. In Aaai spring symposium: Logical formalizations of commonsense reasoning , volume 46, page 47. Lajanugen Logeswaran and Honglak Lee. 2018. An ef\ufb01cient framework for learning sentence represen- tations. In International Conference on Learning Representations . Bryan McCann, James Bradbury, Caiming Xiong, and Richard Socher. 2017. Learned in translation: Con-","tool for measuring readability. Journalism Bulletin , 30(4):415\u2013433. Erik F Tjong Kim Sang and Fien De Meulder. 2003. Introduction to the conll-2003 shared task: Language-independent named entity recognition. In CoNLL . Joseph Turian, Lev Ratinov, and Yoshua Bengio. 2010. Word representations: A simple and general method for semi-supervised learning. In Proceedings of the 48th Annual Meeting of the Association for Compu- tational Linguistics , ACL \u201910, pages 384\u2013394. Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems , pages 6000\u20136010. Pascal Vincent, Hugo Larochelle, Yoshua Bengio, and Pierre-Antoine Manzagol. 2008. Extracting and composing robust features with denoising autoen- coders. In Proceedings of the 25th international conference on Machine learning , pages 1096\u20131103. ACM. Alex Wang, Amanpreet Singh, Julian Michael, Fe- lix Hill, Omer Levy, and Samuel Bowman. 2018a. Glue: A multi-task benchmark and analysis platformfor natural language understanding. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: An- alyzing and Interpreting Neural Networks for NLP , pages 353\u2013355. Wei Wang, Ming Yan, and Chen Wu. 2018b. Multi- granularity hierarchical attention fusion networks for reading comprehension and question answering. InProceedings of the 56th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers) . Association for Computational Lin- guistics. Alex Warstadt, Amanpreet Singh, and Samuel R Bow- man. 2018. Neural network acceptability judg- ments. arXiv preprint arXiv:1805.12471 . Adina Williams, Nikita Nangia, and Samuel R Bow- man. 2018. A broad-coverage challenge corpus for sentence understanding through inference. In NAACL . Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, et al. 2016. Google\u2019s neural ma- chine translation system: Bridging the gap between human and machine translation. arXiv preprint arXiv:1609.08144 . Jason Yosinski, Jeff Clune, Yoshua Bengio, and Hod Lipson. 2014. How transferable are features in deep neural networks? In Advances in neural information processing systems , pages 3320\u20133328. Adams Wei Yu, David Dohan, Minh-Thang Luong, Rui Zhao, Kai Chen, Mohammad Norouzi, and Quoc V Le. 2018. QANet: Combining local convolution with global self-attention for reading comprehen- sion. In ICLR . Rowan Zellers, Yonatan Bisk, Roy Schwartz, and Yejin Choi. 2018. Swag: A large-scale adversarial dataset for grounded commonsense inference. In Proceed- ings of the 2018 Conference on Empirical Methods in Natural Language Processing (EMNLP) . Yukun Zhu, Ryan Kiros, Rich Zemel, Ruslan Salakhut- dinov, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. 2015. Aligning books and movies: Towards story-like visual explanations by watching movies and reading books. In Proceedings of the IEEE international conference on computer vision , pages 19\u201327. Appendix for \u201cBERT: Pre-training of Deep Bidirectional Transformers for Language Understanding\u201d We organize the appendix into three sections: \u2022 Additional implementation details for BERT are presented in Appendix A;\u2022 Additional details for our experiments are presented in Appendix B; and \u2022 Additional ablation studies are presented in Appendix C. We present additional ablation studies for BERT including: \u2013Effect of Number of Training Steps; and \u2013Ablation for Different"]} {"question": "What is the main difference between BERT and previous language representation models?", "variant_id": "v1", "line_number": 2, answer":"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers, allowing it to incorporate context from both directions. This is unlike previous language representation models that are unidirectional, which limits the choice of architectures that can be used during pre-training and could be sub-optimal for sentence-level tasks and token-level tasks such as question answering.","context":["BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding Jacob Devlin Ming-Wei Chang Kenton Lee Kristina Toutanova Google AI Language fjacobdevlin,mingweichang,kentonl,kristout [email protected] Abstract We introduce a new language representa- tion model called BERT , which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language repre- sentation models (Peters et al., 2018a; Rad- ford et al., 2018), BERT is designed to pre- train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a re- sult, the pre-trained BERT model can be \ufb01ne- tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task- speci\ufb01c architecture modi\ufb01cations. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art re- sults on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answer- ing Test F1 to 93.2 (1.5 point absolute im- provement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement). 1 Introduction Language model pre-training has been shown to be effective for improving many natural language processing tasks (Dai and Le, 2015; Peters et al., 2018a; Radford et al., 2018; Howard and Ruder, 2018). These include sentence-level tasks such as natural language inference (Bowman et al., 2015; Williams et al., 2018) and paraphrasing (Dolan and Brockett, 2005), which aim to predict the re- lationships between sentences by analyzing them holistically, as well as token-level tasks such as named entity recognition and question answering, where models are required to produce \ufb01ne-grained output at the token level (Tjong Kim Sang and De Meulder, 2003; Rajpurkar et al., 2016).There are two existing strategies for apply- ing pre-trained language representations to down- stream tasks: feature-based and\ufb01ne-tuning . The feature-based approach, such as ELMo (Peters et al., 2018a), uses task-speci\ufb01c architectures that include the pre-trained representations as addi- tional features. The \ufb01ne-tuning approach, such as the Generative Pre-trained Transformer (OpenAI GPT) (Radford et al., 2018), introduces minimal task-speci\ufb01c parameters, and is trained on the downstream tasks by simply \ufb01ne-tuning allpre- trained parameters. The two approaches share the same objective function during pre-training, where they use unidirectional language models to learn general language representations. We argue that current techniques restrict the power of the pre-trained representations, espe- cially for the \ufb01ne-tuning approaches. The ma- jor limitation is that standard language models are unidirectional, and this limits the choice of archi- tectures that can be used during pre-training. For example, in OpenAI GPT, the authors use a left-to- right architecture, where every token can only at- tend to previous tokens in the self-attention layers of the Transformer (Vaswani et al., 2017). Such re- strictions are sub-optimal for sentence-level tasks, and could be very harmful when applying \ufb01ne- tuning based approaches to token-level tasks such as question answering, where it is crucial to incor- porate context from both directions. In this paper, we improve the \ufb01ne-tuning based approaches by proposing BERT: Bidirectional Encoder Representations from Transformers.","the self-attention layers of the Transformer (Vaswani et al., 2017). Such re- strictions are sub-optimal for sentence-level tasks, and could be very harmful when applying \ufb01ne- tuning based approaches to token-level tasks such as question answering, where it is crucial to incor- porate context from both directions. In this paper, we improve the \ufb01ne-tuning based approaches by proposing BERT: Bidirectional Encoder Representations from Transformers. BERT alleviates the previously mentioned unidi- rectionality constraint by using a \u201cmasked lan- guage model\u201d (MLM) pre-training objective, in- spired by the Cloze task (Taylor, 1953). The masked language model randomly masks some of the tokens from the input, and the objective is to predict the original vocabulary id of the maskedarXiv:1810.04805v2 [cs.CL] 24 May 2019word based only on its context. Unlike left-to- right language model pre-training, the MLM ob- jective enables the representation to fuse the left and the right context, which allows us to pre- train a deep bidirectional Transformer. In addi- tion to the masked language model, we also use a \u201cnext sentence prediction\u201d task that jointly pre- trains text-pair representations. The contributions of our paper are as follows: \u2022 We demonstrate the importance of bidirectional pre-training for language representations. Un- like Radford et al. (2018), which uses unidirec- tional language models for pre-training, BERT uses masked language models to enable pre- trained deep bidirectional representations. This is also in contrast to Peters et al. (2018a), which uses a shallow concatenation of independently trained left-to-right and right-to-left LMs. \u2022 We show that pre-trained representations reduce the need for many heavily-engineered task- speci\ufb01c architectures. BERT is the \ufb01rst \ufb01ne- tuning based representation model that achieves state-of-the-art performance on a large suite of sentence-level andtoken-level tasks, outper- forming many task-speci\ufb01c architectures. \u2022 BERT advances the state of the art for eleven NLP tasks. The code and pre-trained mod- els are available at https:\/\/github.com\/ google-research\/bert . 2 Related Work There is a long history of pre-training general lan- guage representations, and we brie\ufb02y review the most widely-used approaches in this section. 2.1 Unsupervised Feature-based Approaches Learning widely applicable representations of words has been an active area of research for decades, including non-neural (Brown et al., 1992; Ando and Zhang, 2005; Blitzer et al., 2006) and neural (Mikolov et al., 2013; Pennington et al., 2014) methods. Pre-trained word embeddings are an integral part of modern NLP systems, of- fering signi\ufb01cant improvements over embeddings learned from scratch (Turian et al., 2010). To pre- train word embedding vectors, left-to-right lan- guage modeling objectives have been used (Mnih and Hinton, 2009), as well as objectives to dis- criminate correct from incorrect words in left and right context (Mikolov et al., 2013).These approaches have been generalized to coarser granularities, such as sentence embed- dings (Kiros et al., 2015; Logeswaran and Lee, 2018) or paragraph embeddings (Le and Mikolov, 2014). "]' outputs: groundedness: type: string reference: ${parse_score.output} nodes: - name: parse_score type: python source: type: code path: calc_groundedness.py inputs: gpt_score: ${gpt_groundedness.output} - name: aggregate type: python source: type: code path: aggregate.py inputs: groundedness_scores: ${parse_score.output} aggregation: true - name: gpt_groundedness type: llm source: type: code path: gpt_groundedness.md inputs: # This is to easily switch between openai and azure openai. # deployment_name is required by azure openai, model is required by openai. deployment_name: gpt-4 model: gpt-4 max_tokens: 5 answer: ${inputs.answer} question: ${inputs.question} context: ${inputs.context} temperature: 0 connection: open_ai_connection api: chat
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-groundedness/gpt_groundedness.md
user: # Instructions * There are many chatbots that can answer users questions based on the context given from different sources like search results, or snippets from books/papers. They try to understand users's question and then get context by either performing search from search engines, databases or books/papers for relevant content. Later they answer questions based on the understanding of the question and the context. * Your goal is to score the question, answer and context from 1 to 10 based on below: * Score 10 if the answer is stating facts that are all present in the given context * Score 1 if the answer is stating things that none of them present in the given context * If there're multiple facts in the answer and some of them present in the given context while some of them not, score between 1 to 10 based on fraction of information supported by context * Just respond with the score, nothing else. # Real work ## Question {{question}} ## Answer {{answer}} ## Context {{context}} ## Score
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-accuracy-maths-to-code/test_data.jsonl
{"question": "What is the sum of 5 and 3?", "groundtruth": "8", "answer": "8"} {"question": "Subtract 7 from 10.", "groundtruth": "3", "answer": "3"} {"question": "Multiply 6 by 4.", "groundtruth": "24", "answer": "24"} {"question": "Divide 20 by 5.", "groundtruth": "4", "answer": "4"} {"question": "What is the square of 7?", "groundtruth": "49", "answer": "49"} {"question": "What is the square root of 81?", "groundtruth": "9", "answer": "9"} {"question": "If a rectangle has a length of 10 and width of 5, what is the area?", "groundtruth": "50", "answer": "50"} {"question": "A circle has a radius of 7, what is the area? (Use 3.14 for pi)", "groundtruth": "153.86", "answer": "153.871"} {"question": "Solve for x in the equation 2x + 3 = 9.", "groundtruth": "3", "answer": "3"} {"question": "What is the value of x if 5x = 25?", "groundtruth": "5", "answer": "5"} {"question": "A car travels 200 miles in 4 hours. What is the average speed of the car?", "groundtruth": "50", "answer": "50"} {"question": "A car travels at a speed of 60 mph. How long will it take to travel 180 miles?", "groundtruth": "3", "answer": "3"} {"question": "If a car travels at a speed of 40 mph for 2 hours, how far will it travel?","groundtruth": "80", "answer": "80"} {"question":"A rectangle has length = 10 cm and width = 5 cm. What is its area?", "groundtruth":"50", "answer": "50"} {"question":"A circle has radius = 7 cm. What is its circumference? (Use pi =3.14)", "groundtruth":"43.96", "answer": "43.959"} {"question":"A triangle has base =10 cm and height =5 cm. What is its area?", "groundtruth":"25", "answer": "25"} {"question":"What is the slope of the line that passes through (2,3) and (4,7)?", "groundtruth":"2", "answer": "2"} {"question":"The distance between A and B is 2000km, A is moving towards B with speed 80km/hour, meanwhile B is moving towards A with speed 120km/hour, how many hours later A and B can meet?", "groundtruth":"10", "answer": "10"} {"question":"The lengths of the two perpendicular sides of a right triangle are 6cm and 8cm. What is the length of the hypotenuse?", "groundtruth": "10", "answer": "10"} {"question":"A is running with average speed 10km/hour, A already run half hour. B start to chase A along the same route with average speed 15km/hour, how many hours B will take to meet A?", "groundtruth":"1", "answer": "2"}
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-accuracy-maths-to-code/line_process.py
from promptflow import tool @tool def line_process(groundtruth: str, prediction: str) -> int: processed_result = 0 if prediction == "JSONDecodeError" or prediction.startswith("Unknown Error:"): processed_result = -1 return processed_result try: groundtruth = float(groundtruth) prediction = float(prediction) except ValueError: processed_result = -1 return processed_result if round(prediction, 2) == round(groundtruth, 2): processed_result = 1 return processed_result if __name__ == "__main__": processed_result = line_process("1.0", "1") print("The processed result is", processed_result) processed_result = line_process("3.14", "3.1415926") print("The processed result is", processed_result) processed_result = line_process("2.1", "2.0") print("The processed result is", processed_result) processed_result = line_process("1.0", "JSONDecodeError") print("The processed result is", processed_result) processed_result = line_process("1.0", "No module named 'numpy'") print("The processed result is", processed_result)
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-accuracy-maths-to-code/aggregate.py
from typing import List from promptflow import tool from promptflow import log_metric @tool def accuracy_aggregate(processed_results: List[int]): num_exception = 0 num_correct = 0 for i in range(len(processed_results)): if processed_results[i] == -1: num_exception += 1 elif processed_results[i] == 1: num_correct += 1 num_total = len(processed_results) accuracy = round(1.0 * num_correct / num_total, 2) error_rate = round(1.0 * num_exception / num_total, 2) log_metric(key="accuracy", value=accuracy) log_metric(key="error_rate", value=error_rate) return { "num_total": num_total, "num_correct": num_correct, "num_exception": num_exception, "accuracy": accuracy, "error_rate": error_rate } if __name__ == "__main__": numbers = [1, 1, 1, 1, 0, -1, -1] accuracy = accuracy_aggregate(numbers) print("The accuracy is", accuracy)
0
promptflow_repo/promptflow/examples/flows/evaluation
promptflow_repo/promptflow/examples/flows/evaluation/eval-accuracy-maths-to-code/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: groundtruth: type: string default: "1" prediction: type: string default: "2" outputs: score: type: string reference: ${line_process.output} nodes: - name: line_process type: python source: type: code path: line_process.py inputs: groundtruth: ${inputs.groundtruth} prediction: ${inputs.prediction} - name: aggregate type: python source: type: code path: aggregate.py inputs: processed_results: ${line_process.output} aggregation: true
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/describe-image/flip_image.py
import io from promptflow import tool from promptflow.contracts.multimedia import Image from PIL import Image as PIL_Image @tool def passthrough(input_image: Image) -> Image: image_stream = io.BytesIO(input_image) pil_image = PIL_Image.open(image_stream) flipped_image = pil_image.transpose(PIL_Image.FLIP_LEFT_RIGHT) buffer = io.BytesIO() flipped_image.save(buffer, format="PNG") return Image(buffer.getvalue(), mime_type="image/png")
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/describe-image/data.jsonl
{"question": "How many colors are there in the image?", "input_image": {"data:image/png;url": "https://developer.microsoft.com/_devcom/images/logo-ms-social.png"}} {"question": "What's this image about?", "input_image": {"data:image/png;url": "https://developer.microsoft.com/_devcom/images/404.png"}}
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/describe-image/requirements.txt
promptflow promptflow-tools
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/describe-image/README.md
# Describe image flow A flow that take image input, flip it horizontally and uses OpenAI GPT-4V tool to describe it. Tools used in this flow: - `OpenAI GPT-4V` tool - custom `python` Tool Connections used in this flow: - OpenAI Connection ## Prerequisites Install promptflow sdk and other dependencies, create connection for OpenAI GPT-4V tool to use: ```bash pip install -r requirements.txt pf connection create --file ../../../connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base> name=aoai_gpt4v_connection api_version=2023-07-01-preview ``` ## Run flow - Prepare OpenAI connection Go to "Prompt flow" "Connections" tab. Click on "Create" button, and create an "OpenAI" connection. If you do not have an OpenAI account, please refer to [OpenAI](https://platform.openai.com/) for more details. - Test flow/node ```bash # test with default input value in flow.dag.yaml pf flow test --flow . # test with flow inputs pf flow test --flow . --inputs question="How many colors can you see?" input_image="https://developer.microsoft.com/_devcom/images/logo-ms-social.png" ``` - Create run with multiple lines data ```bash # using environment from .env file (loaded in user code: hello.py) pf run create --flow . --data ./data.jsonl --column-mapping question='${data.question}' --stream ``` You can also skip providing `column-mapping` if provided data has same column name as the flow. Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. - List and show run meta ```bash # list created run pf run list # get a sample run name name=$(pf run list -r 10 | jq '.[] | select(.name | contains("describe_image_variant_0")) | .name'| head -n 1 | tr -d '"') # show specific run detail pf run show --name $name # show output pf run show-details --name $name # visualize run in browser pf run visualize --name $name ```
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/describe-image/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: question: type: string default: Please describe this image. input_image: type: image default: https://developer.microsoft.com/_devcom/images/logo-ms-social.png outputs: answer: type: string reference: ${question_on_image.output} output_image: type: string reference: ${flip_image.output} nodes: - name: flip_image type: python source: type: code path: flip_image.py inputs: input_image: ${inputs.input_image} - name: question_on_image type: custom_llm source: type: package_with_prompt tool: promptflow.tools.aoai_gpt4v.AzureOpenAI.chat path: question_on_image.jinja2 inputs: connection: aoai_gpt4v_connection deployment_name: gpt-4v max_tokens: 512 question: ${inputs.question} test_image: ${flip_image.output}
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/describe-image/question_on_image.jinja2
# system: As an AI assistant, your task involves interpreting images and responding to questions about the image. Remember to provide accurate answers based on the information present in the image. # user: {{question}} ![image]({{test_image}})
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/conditional-flow-for-if-else/generate_result.py
from promptflow import tool @tool def generate_result(llm_result="", default_result="") -> str: if llm_result: return llm_result else: return default_result
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/conditional-flow-for-if-else/data.jsonl
{"question": "What is Prompt flow?"} {"question": "What is ChatGPT?"}
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/conditional-flow-for-if-else/requirements.txt
promptflow promptflow-tools
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/conditional-flow-for-if-else/default_result.py
from promptflow import tool @tool def default_result(question: str) -> str: return f"I'm not familiar with your query: {question}."
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/conditional-flow-for-if-else/README.md
# Conditional flow for if-else scenario This example is a conditional flow for if-else scenario. By following this example, you will learn how to create a conditional flow using the `activate config`. ## Flow description In this flow, it checks if an input query passes content safety check. If it's denied, we'll return a default response; otherwise, we'll call LLM to get a response and then summarize the final results. The following are two execution situations of this flow: - if input query passes content safety check: ![content_safety_check_passed](content_safety_check_passed.png) - else: ![content_safety_check_failed](content_safety_check_failed.png) **Notice**: The `content_safety_check` and `llm_result` node in this flow are dummy nodes that do not actually use the conten safety tool and LLM tool. You can replace them with the real ones. Learn more: [LLM Tool](https://microsoft.github.io/promptflow/reference/tools-reference/llm-tool.html) ## Prerequisites Install promptflow sdk and other dependencies: ```bash pip install -r requirements.txt ``` ## Run flow - Test flow ```bash # test with default input value in flow.dag.yaml pf flow test --flow . # test with flow inputs pf flow test --flow . --inputs question="What is Prompt flow?" ``` - Create run with multiple lines of data ```bash # create a random run name run_name="conditional_flow_for_if_else_"$(openssl rand -hex 12) # create run pf run create --flow . --data ./data.jsonl --column-mapping question='${data.question}' --stream --name $run_name ``` - List and show run metadata ```bash # list created run pf run list # show specific run detail pf run show --name $run_name # show output pf run show-details --name $run_name # visualize run in browser pf run visualize --name $run_name ```
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/conditional-flow-for-if-else/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: question: type: string default: What is Prompt flow? outputs: answer: type: string reference: ${generate_result.output} nodes: - name: content_safety_check type: python source: type: code path: content_safety_check.py inputs: text: ${inputs.question} - name: llm_result type: python source: type: code path: llm_result.py inputs: question: ${inputs.question} activate: when: ${content_safety_check.output} is: true - name: default_result type: python source: type: code path: default_result.py inputs: question: ${inputs.question} activate: when: ${content_safety_check.output} is: false - name: generate_result type: python source: type: code path: generate_result.py inputs: llm_result: ${llm_result.output} default_result: ${default_result.output} environment: python_requirements_txt: requirements.txt
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/conditional-flow-for-if-else/llm_result.py
from promptflow import tool @tool def llm_result(question: str) -> str: # You can use an LLM node to replace this tool. return ( "Prompt flow is a suite of development tools designed to streamline " "the end-to-end development cycle of LLM-based AI applications." )
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/conditional-flow-for-if-else/content_safety_check.py
from promptflow import tool import random @tool def content_safety_check(text: str) -> str: # You can use a content safety node to replace this tool. return random.choice([True, False])
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/basic/hello.py
import os from openai.version import VERSION as OPENAI_VERSION from dotenv import load_dotenv from promptflow import tool # The inputs section will change based on the arguments of the tool function, after you save the code # Adding type to arguments and return value will help the system show the types properly # Please update the function name/signature per need def to_bool(value) -> bool: return str(value).lower() == "true" def get_client(): if OPENAI_VERSION.startswith("0."): raise Exception( "Please upgrade your OpenAI package to version >= 1.0.0 or using the command: pip install --upgrade openai." ) api_key = os.environ["AZURE_OPENAI_API_KEY"] conn = dict( api_key=os.environ["AZURE_OPENAI_API_KEY"], ) if api_key.startswith("sk-"): from openai import OpenAI as Client else: from openai import AzureOpenAI as Client conn.update( azure_endpoint=os.environ["AZURE_OPENAI_API_BASE"], api_version=os.environ.get("AZURE_OPENAI_API_VERSION", "2023-07-01-preview"), ) return Client(**conn) @tool def my_python_tool( prompt: str, # for AOAI, deployment name is customized by user, not model name. deployment_name: str, suffix: str = None, max_tokens: int = 120, temperature: float = 1.0, top_p: float = 1.0, n: int = 1, logprobs: int = None, echo: bool = False, stop: list = None, presence_penalty: float = 0, frequency_penalty: float = 0, best_of: int = 1, logit_bias: dict = {}, user: str = "", **kwargs, ) -> str: if "AZURE_OPENAI_API_KEY" not in os.environ: # load environment variables from .env file load_dotenv() if "AZURE_OPENAI_API_KEY" not in os.environ: raise Exception("Please specify environment variables: AZURE_OPENAI_API_KEY") # TODO: remove below type conversion after client can pass json rather than string. echo = to_bool(echo) response = get_client().completions.create( prompt=prompt, model=deployment_name, # empty string suffix should be treated as None. suffix=suffix if suffix else None, max_tokens=int(max_tokens), temperature=float(temperature), top_p=float(top_p), n=int(n), logprobs=int(logprobs) if logprobs else None, echo=echo, # fix bug "[] is not valid under any of the given schemas-'stop'" stop=stop if stop else None, presence_penalty=float(presence_penalty), frequency_penalty=float(frequency_penalty), best_of=int(best_of), # Logit bias must be a dict if we passed it to openai api. logit_bias=logit_bias if logit_bias else {}, user=user, ) # get first element because prompt is single. return response.choices[0].text
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/basic/data.jsonl
{"text": "Python Hello World!"} {"text": "C Hello World!"} {"text": "C# Hello World!"}
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/basic/hello.jinja2
{# Please replace the template with your own prompt. #} Write a simple {{text}} program that displays the greeting message when executed.
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/basic/requirements.txt
promptflow[azure] promptflow-tools python-dotenv
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/basic/.env.example
AZURE_OPENAI_API_KEY=<your_AOAI_key> AZURE_OPENAI_API_BASE=<your_AOAI_endpoint> AZURE_OPENAI_API_TYPE=azure
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/basic/run.yml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Run.schema.json flow: . data: data.jsonl environment_variables: # environment variables from connection AZURE_OPENAI_API_KEY: ${open_ai_connection.api_key} AZURE_OPENAI_API_BASE: ${open_ai_connection.api_base} AZURE_OPENAI_API_TYPE: azure column_mapping: text: ${data.text}
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/basic/README.md
# Basic standard flow A basic standard flow using custom python tool that calls Azure OpenAI with connection info stored in environment variables. Tools used in this flow: - `prompt` tool - custom `python` Tool Connections used in this flow: - None ## Prerequisites Install promptflow sdk and other dependencies: ```bash pip install -r requirements.txt ``` ## Run flow - Prepare your Azure Open AI resource follow this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one. - Setup environment variables Ensure you have put your azure open ai endpoint key in [.env](.env) file. You can create one refer to this [example file](.env.example). ```bash cat .env ``` - Test flow/node ```bash # test with default input value in flow.dag.yaml pf flow test --flow . # test with flow inputs pf flow test --flow . --inputs text="Java Hello World!" # test node with inputs pf flow test --flow . --node llm --inputs prompt="Write a simple Hello World program that displays the greeting message when executed." ``` - Create run with multiple lines data ```bash # using environment from .env file (loaded in user code: hello.py) pf run create --flow . --data ./data.jsonl --column-mapping text='${data.text}' --stream ``` You can also skip providing `column-mapping` if provided data has same column name as the flow. Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. - List and show run meta ```bash # list created run pf run list # get a sample run name name=$(pf run list -r 10 | jq '.[] | select(.name | contains("basic_variant_0")) | .name'| head -n 1 | tr -d '"') # show specific run detail pf run show --name $name # show output pf run show-details --name $name # visualize run in browser pf run visualize --name $name ``` ## Run flow with connection Storing connection info in .env with plaintext is not safe. We recommend to use `pf connection` to guard secrets like `api_key` from leak. - Show or create `open_ai_connection` ```bash # create connection from `azure_openai.yml` file # Override keys with --set to avoid yaml file changes pf connection create --file ../../../connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base> # check if connection exists pf connection show -n open_ai_connection ``` - Test using connection secret specified in environment variables **Note**: we used `'` to wrap value since it supports raw value without escape in powershell & bash. For windows command prompt, you may remove the `'` to avoid it become part of the value. ```bash # test with default input value in flow.dag.yaml pf flow test --flow . --environment-variables AZURE_OPENAI_API_KEY='${open_ai_connection.api_key}' AZURE_OPENAI_API_BASE='${open_ai_connection.api_base}' ``` - Create run using connection secret binding specified in environment variables, see [run.yml](run.yml) ```bash # create run pf run create --flow . --data ./data.jsonl --stream --environment-variables AZURE_OPENAI_API_KEY='${open_ai_connection.api_key}' AZURE_OPENAI_API_BASE='${open_ai_connection.api_base}' --column-mapping text='${data.text}' # create run using yaml file pf run create --file run.yml --stream # show outputs name=$(pf run list -r 10 | jq '.[] | select(.name | contains("basic_variant_0")) | .name'| head -n 1 | tr -d '"') pf run show-details --name $name ``` ## Run flow in cloud with connection - Assume we already have a connection named `open_ai_connection` in workspace. ```bash # set default workspace az account set -s <your_subscription_id> az configure --defaults group=<your_resource_group_name> workspace=<your_workspace_name> ``` - Create run ```bash # run with environment variable reference connection in azureml workspace pfazure run create --flow . --data ./data.jsonl --environment-variables AZURE_OPENAI_API_KEY='${open_ai_connection.api_key}' AZURE_OPENAI_API_BASE='${open_ai_connection.api_base}' --column-mapping text='${data.text}' --stream # run using yaml file pfazure run create --file run.yml --stream ``` - List and show run meta ```bash # list created run pfazure run list -r 3 # get a sample run name name=$(pfazure run list -r 100 | jq '.[] | select(.name | contains("basic_variant_0")) | .name'| head -n 1 | tr -d '"') # show specific run detail pfazure run show --name $name # show output pfazure run show-details --name $name # visualize run in browser pfazure run visualize --name $name ```
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/basic/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json environment: python_requirements_txt: requirements.txt inputs: text: type: string default: Hello World! outputs: output: type: string reference: ${llm.output} nodes: - name: hello_prompt type: prompt source: type: code path: hello.jinja2 inputs: text: ${inputs.text} - name: llm type: python source: type: code path: hello.py inputs: prompt: ${hello_prompt.output} deployment_name: text-davinci-003 max_tokens: "120"
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/web-classification/classify_with_llm.jinja2
system: Your task is to classify a given url into one of the following categories: Movie, App, Academic, Channel, Profile, PDF or None based on the text content information. The classification will be based on the url, the webpage text content summary, or both. user: The selection range of the value of "category" must be within "Movie", "App", "Academic", "Channel", "Profile", "PDF" and "None". The selection range of the value of "evidence" must be within "Url", "Text content", and "Both". Here are a few examples: {% for ex in examples %} URL: {{ex.url}} Text content: {{ex.text_content}} OUTPUT: {"category": "{{ex.category}}", "evidence": "{{ex.evidence}}"} {% endfor %} For a given URL and text content, classify the url to complete the category and indicate evidence: URL: {{url}} Text content: {{text_content}}. OUTPUT:
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/web-classification/fetch_text_content_from_url.py
import bs4 import requests from promptflow import tool @tool def fetch_text_content_from_url(url: str): # Send a request to the URL try: headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35" } response = requests.get(url, headers=headers) if response.status_code == 200: # Parse the HTML content using BeautifulSoup soup = bs4.BeautifulSoup(response.text, "html.parser") soup.prettify() return soup.get_text()[:2000] else: msg = ( f"Get url failed with status code {response.status_code}.\nURL: {url}\nResponse: " f"{response.text[:100]}" ) print(msg) return "No available content" except Exception as e: print("Get url failed with error: {}".format(e)) return "No available content"
0
promptflow_repo/promptflow/examples/flows/standard
promptflow_repo/promptflow/examples/flows/standard/web-classification/convert_to_dict.py
import json from promptflow import tool @tool def convert_to_dict(input_str: str): try: return json.loads(input_str) except Exception as e: print("The input is not valid, error: {}".format(e)) return {"category": "None", "evidence": "None"}
0