Muennighoff
commited on
Scheduled Commit
Browse files
data/clustering_battle-f36c9f97-e795-4522-8eb7-7dee254b42c7.jsonl
CHANGED
@@ -1,3 +1,4 @@
|
|
1 |
{"tstamp": 1723537022.381, "task_type": "clustering", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "6f8f3ee31b2f4a76b9c393538a2c4cd4", "0_model_name": "text-embedding-004", "0_prompt": ["water filter", "sleeping bag", "tent", "camping stove", "flashlight", "backpack", "nitrogen", "hydrogen", "calcium", "iron", "carbon", "oxygen", "classical", "country", "reggae", "jazz", "electronic", "rock"], "0_ncluster": 3, "0_output": "", "0_ndim": "3D (press for 2D)", "0_dim_method": "PCA", "0_clustering_method": "KMeans", "1_conv_id": "4276e84f4a32486b8041dddb8e036f79", "1_model_name": "embed-english-v3.0", "1_prompt": ["water filter", "sleeping bag", "tent", "camping stove", "flashlight", "backpack", "nitrogen", "hydrogen", "calcium", "iron", "carbon", "oxygen", "classical", "country", "reggae", "jazz", "electronic", "rock"], "1_ncluster": 3, "1_output": "", "1_ndim": "3D (press for 2D)", "1_dim_method": "PCA", "1_clustering_method": "KMeans"}
|
2 |
{"tstamp": 1723537063.3503, "task_type": "clustering", "type": "leftvote", "models": ["", ""], "ip": "", "0_conv_id": "14a28c4de8364126b794fbd33b3efb9a", "0_model_name": "intfloat/multilingual-e5-large-instruct", "0_prompt": ["americano", "macchiato", "cold brew", "espresso", "gas", "solid"], "0_ncluster": 2, "0_output": "", "0_ndim": "3D (press for 2D)", "0_dim_method": "PCA", "0_clustering_method": "KMeans", "1_conv_id": "fb7f3d68fe9d408ea66bb106d257f4f0", "1_model_name": "BAAI/bge-large-en-v1.5", "1_prompt": ["americano", "macchiato", "cold brew", "espresso", "gas", "solid"], "1_ncluster": 2, "1_output": "", "1_ndim": "3D (press for 2D)", "1_dim_method": "PCA", "1_clustering_method": "KMeans"}
|
3 |
{"tstamp": 1723537092.4097, "task_type": "clustering", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "5eb4650b78024d97a994ab884abf39a2", "0_model_name": "intfloat/e5-mistral-7b-instruct", "0_prompt": ["baking", "roasting", "boiling", "Spanish", "French", "Arabic", "Russian", "English", "Hindi"], "0_ncluster": 2, "0_output": "", "0_ndim": "3D (press for 2D)", "0_dim_method": "PCA", "0_clustering_method": "KMeans", "1_conv_id": "1d943122db25477f9451a742183f91b9", "1_model_name": "intfloat/multilingual-e5-large-instruct", "1_prompt": ["baking", "roasting", "boiling", "Spanish", "French", "Arabic", "Russian", "English", "Hindi"], "1_ncluster": 2, "1_output": "", "1_ndim": "3D (press for 2D)", "1_dim_method": "PCA", "1_clustering_method": "KMeans"}
|
|
|
|
1 |
{"tstamp": 1723537022.381, "task_type": "clustering", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "6f8f3ee31b2f4a76b9c393538a2c4cd4", "0_model_name": "text-embedding-004", "0_prompt": ["water filter", "sleeping bag", "tent", "camping stove", "flashlight", "backpack", "nitrogen", "hydrogen", "calcium", "iron", "carbon", "oxygen", "classical", "country", "reggae", "jazz", "electronic", "rock"], "0_ncluster": 3, "0_output": "", "0_ndim": "3D (press for 2D)", "0_dim_method": "PCA", "0_clustering_method": "KMeans", "1_conv_id": "4276e84f4a32486b8041dddb8e036f79", "1_model_name": "embed-english-v3.0", "1_prompt": ["water filter", "sleeping bag", "tent", "camping stove", "flashlight", "backpack", "nitrogen", "hydrogen", "calcium", "iron", "carbon", "oxygen", "classical", "country", "reggae", "jazz", "electronic", "rock"], "1_ncluster": 3, "1_output": "", "1_ndim": "3D (press for 2D)", "1_dim_method": "PCA", "1_clustering_method": "KMeans"}
|
2 |
{"tstamp": 1723537063.3503, "task_type": "clustering", "type": "leftvote", "models": ["", ""], "ip": "", "0_conv_id": "14a28c4de8364126b794fbd33b3efb9a", "0_model_name": "intfloat/multilingual-e5-large-instruct", "0_prompt": ["americano", "macchiato", "cold brew", "espresso", "gas", "solid"], "0_ncluster": 2, "0_output": "", "0_ndim": "3D (press for 2D)", "0_dim_method": "PCA", "0_clustering_method": "KMeans", "1_conv_id": "fb7f3d68fe9d408ea66bb106d257f4f0", "1_model_name": "BAAI/bge-large-en-v1.5", "1_prompt": ["americano", "macchiato", "cold brew", "espresso", "gas", "solid"], "1_ncluster": 2, "1_output": "", "1_ndim": "3D (press for 2D)", "1_dim_method": "PCA", "1_clustering_method": "KMeans"}
|
3 |
{"tstamp": 1723537092.4097, "task_type": "clustering", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "5eb4650b78024d97a994ab884abf39a2", "0_model_name": "intfloat/e5-mistral-7b-instruct", "0_prompt": ["baking", "roasting", "boiling", "Spanish", "French", "Arabic", "Russian", "English", "Hindi"], "0_ncluster": 2, "0_output": "", "0_ndim": "3D (press for 2D)", "0_dim_method": "PCA", "0_clustering_method": "KMeans", "1_conv_id": "1d943122db25477f9451a742183f91b9", "1_model_name": "intfloat/multilingual-e5-large-instruct", "1_prompt": ["baking", "roasting", "boiling", "Spanish", "French", "Arabic", "Russian", "English", "Hindi"], "1_ncluster": 2, "1_output": "", "1_ndim": "3D (press for 2D)", "1_dim_method": "PCA", "1_clustering_method": "KMeans"}
|
4 |
+
{"tstamp": 1723538460.2356, "task_type": "clustering", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "86e5675f25114ddd86d90a07b0d38817", "0_model_name": "Alibaba-NLP/gte-Qwen2-7B-instruct", "0_prompt": ["metamorphic", "igneous", "sedimentary", "loafers", "sneakers", "loafers", "flats", "sandals", "boots", "high heels", "romance", "documentary", "drama", "thriller", "comedy", "tennis", "swimming", "volleyball", "cricket", "soccer", "basketball", "claustrophobia", "acrophobia", "nyctophobia", "arachnophobia", "agoraphobia", "ophidiophobia", "PlayStation", "Atari", "Xbox", "Nintendo"], "0_ncluster": 5, "0_output": "", "0_ndim": "2D (press for 3D)", "0_dim_method": "PCA", "0_clustering_method": "KMeans", "1_conv_id": "a5e9e96fdc6a47c8b7b2c09f6d2a2efd", "1_model_name": "mixedbread-ai/mxbai-embed-large-v1", "1_prompt": ["metamorphic", "igneous", "sedimentary", "loafers", "sneakers", "loafers", "flats", "sandals", "boots", "high heels", "romance", "documentary", "drama", "thriller", "comedy", "tennis", "swimming", "volleyball", "cricket", "soccer", "basketball", "claustrophobia", "acrophobia", "nyctophobia", "arachnophobia", "agoraphobia", "ophidiophobia", "PlayStation", "Atari", "Xbox", "Nintendo"], "1_ncluster": 5, "1_output": "", "1_ndim": "2D (press for 3D)", "1_dim_method": "PCA", "1_clustering_method": "KMeans"}
|
data/clustering_individual-f36c9f97-e795-4522-8eb7-7dee254b42c7.jsonl
CHANGED
@@ -4,3 +4,7 @@
|
|
4 |
{"tstamp": 1723537051.6809, "task_type": "clustering", "type": "chat", "model": "BAAI/bge-large-en-v1.5", "gen_params": {}, "start": 1723537051.1751, "finish": 1723537051.6809, "ip": "", "conv_id": "fb7f3d68fe9d408ea66bb106d257f4f0", "model_name": "BAAI/bge-large-en-v1.5", "prompt": ["americano", "macchiato", "cold brew", "espresso", "gas", "solid"], "ncluster": 2, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
5 |
{"tstamp": 1723537079.8292, "task_type": "clustering", "type": "chat", "model": "intfloat/e5-mistral-7b-instruct", "gen_params": {}, "start": 1723537079.7369, "finish": 1723537079.8292, "ip": "", "conv_id": "5eb4650b78024d97a994ab884abf39a2", "model_name": "intfloat/e5-mistral-7b-instruct", "prompt": ["baking", "roasting", "boiling", "Spanish", "French", "Arabic", "Russian", "English", "Hindi"], "ncluster": 2, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
6 |
{"tstamp": 1723537079.8292, "task_type": "clustering", "type": "chat", "model": "intfloat/multilingual-e5-large-instruct", "gen_params": {}, "start": 1723537079.7369, "finish": 1723537079.8292, "ip": "", "conv_id": "1d943122db25477f9451a742183f91b9", "model_name": "intfloat/multilingual-e5-large-instruct", "prompt": ["baking", "roasting", "boiling", "Spanish", "French", "Arabic", "Russian", "English", "Hindi"], "ncluster": 2, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
|
|
|
|
|
|
|
|
|
4 |
{"tstamp": 1723537051.6809, "task_type": "clustering", "type": "chat", "model": "BAAI/bge-large-en-v1.5", "gen_params": {}, "start": 1723537051.1751, "finish": 1723537051.6809, "ip": "", "conv_id": "fb7f3d68fe9d408ea66bb106d257f4f0", "model_name": "BAAI/bge-large-en-v1.5", "prompt": ["americano", "macchiato", "cold brew", "espresso", "gas", "solid"], "ncluster": 2, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
5 |
{"tstamp": 1723537079.8292, "task_type": "clustering", "type": "chat", "model": "intfloat/e5-mistral-7b-instruct", "gen_params": {}, "start": 1723537079.7369, "finish": 1723537079.8292, "ip": "", "conv_id": "5eb4650b78024d97a994ab884abf39a2", "model_name": "intfloat/e5-mistral-7b-instruct", "prompt": ["baking", "roasting", "boiling", "Spanish", "French", "Arabic", "Russian", "English", "Hindi"], "ncluster": 2, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
6 |
{"tstamp": 1723537079.8292, "task_type": "clustering", "type": "chat", "model": "intfloat/multilingual-e5-large-instruct", "gen_params": {}, "start": 1723537079.7369, "finish": 1723537079.8292, "ip": "", "conv_id": "1d943122db25477f9451a742183f91b9", "model_name": "intfloat/multilingual-e5-large-instruct", "prompt": ["baking", "roasting", "boiling", "Spanish", "French", "Arabic", "Russian", "English", "Hindi"], "ncluster": 2, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
7 |
+
{"tstamp": 1723538391.1962, "task_type": "clustering", "type": "chat", "model": "Alibaba-NLP/gte-Qwen2-7B-instruct", "gen_params": {}, "start": 1723538391.1083, "finish": 1723538391.1962, "ip": "", "conv_id": "86e5675f25114ddd86d90a07b0d38817", "model_name": "Alibaba-NLP/gte-Qwen2-7B-instruct", "prompt": ["metamorphic", "igneous", "sedimentary", "loafers", "sneakers"], "ncluster": 2, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
8 |
+
{"tstamp": 1723538391.1962, "task_type": "clustering", "type": "chat", "model": "mixedbread-ai/mxbai-embed-large-v1", "gen_params": {}, "start": 1723538391.1083, "finish": 1723538391.1962, "ip": "", "conv_id": "a5e9e96fdc6a47c8b7b2c09f6d2a2efd", "model_name": "mixedbread-ai/mxbai-embed-large-v1", "prompt": ["metamorphic", "igneous", "sedimentary", "loafers", "sneakers"], "ncluster": 2, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
9 |
+
{"tstamp": 1723538429.5136, "task_type": "clustering", "type": "chat", "model": "Alibaba-NLP/gte-Qwen2-7B-instruct", "gen_params": {}, "start": 1723538428.9832, "finish": 1723538429.5136, "ip": "", "conv_id": "86e5675f25114ddd86d90a07b0d38817", "model_name": "Alibaba-NLP/gte-Qwen2-7B-instruct", "prompt": ["metamorphic", "igneous", "sedimentary", "loafers", "sneakers", "loafers", "flats", "sandals", "boots", "high heels", "romance", "documentary", "drama", "thriller", "comedy", "tennis", "swimming", "volleyball", "cricket", "soccer", "basketball", "claustrophobia", "acrophobia", "nyctophobia", "arachnophobia", "agoraphobia", "ophidiophobia", "PlayStation", "Atari", "Xbox", "Nintendo"], "ncluster": 5, "output": "", "ndim": "2D (press for 3D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
10 |
+
{"tstamp": 1723538429.5136, "task_type": "clustering", "type": "chat", "model": "mixedbread-ai/mxbai-embed-large-v1", "gen_params": {}, "start": 1723538428.9832, "finish": 1723538429.5136, "ip": "", "conv_id": "a5e9e96fdc6a47c8b7b2c09f6d2a2efd", "model_name": "mixedbread-ai/mxbai-embed-large-v1", "prompt": ["metamorphic", "igneous", "sedimentary", "loafers", "sneakers", "loafers", "flats", "sandals", "boots", "high heels", "romance", "documentary", "drama", "thriller", "comedy", "tennis", "swimming", "volleyball", "cricket", "soccer", "basketball", "claustrophobia", "acrophobia", "nyctophobia", "arachnophobia", "agoraphobia", "ophidiophobia", "PlayStation", "Atari", "Xbox", "Nintendo"], "ncluster": 5, "output": "", "ndim": "2D (press for 3D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
data/retrieval_battle-f36c9f97-e795-4522-8eb7-7dee254b42c7.jsonl
CHANGED
@@ -7,3 +7,6 @@
|
|
7 |
{"tstamp": 1723536949.1762, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "6a48d1094f4c493faf4120db25717b7a", "0_model_name": "text-embedding-004", "0_prompt": "who played bailey in the sisterhood of the traveling pants", "0_output": [["who played bailey in the sisterhood of the traveling pants", "Bailee Madison\n\nBailee Madison Riley (born October 15, 1999) is an American actress, singer and songwriter. Born in Fort Lauderdale, Florida, Madison began acting at just two weeks of age, appearing mostly in television commercials for the first few years. Her film debut, as May Belle Aarons in the fantasy drama Bridge to Terabithia (2007), brought her early praise. She gained wider acclaim for starring in the war drama film Brothers (2009), for which she was nominated for the Critics' Choice Movie Award for Best Young Performer. Madison went on to star in a number of horror and thriller films, beginning with Don't Be Afraid of the Dark (2010), gaining a reputation as a scream queen. She had commercial success with starring roles in the comedies Just Go with It (2011) and Parental Guidance (2012), though the critical reception of these films was mostly negative.\nMadison transitioned to television in ensuing years, first with recurring roles in The Haunting Hour: The Series (20102012), as young Snow White in Once Upon a Time (2012–2016), and as a teenager with multiple mental health issues in the drama series The Fosters (20142015). She played the leading roles of Hillary Harrison in Trophy Wife (2013–2014) and Grace Russell in Good Witch (20152021). Madison gained further praise and resurged her status as a scream queen with her leading role as Imogen Adams in the Max slasher horror series Pretty Little Liars (2022present), a spinoff of the eponymous original series. She subsequently debuted her solo music career with the release of the pop single \"Kinda Fun\" (2024).\nEarly life\nBailee Madison Riley was born in Fort Lauderdale, Florida, to Patricia and Michael Riley. The youngest of seven children, she has four brothers and two sisters. Her elder sister, Kaitlin Vilasuso, is also an actress. Her father is Canadian, and through him Madison has citizenship by birth."]], "0_corpus": "wikipedia", "1_conv_id": "280813e8dbe54bb79beb4813a7e1937f", "1_model_name": "mixedbread-ai/mxbai-embed-large-v1", "1_prompt": "who played bailey in the sisterhood of the traveling pants", "1_output": [["who played bailey in the sisterhood of the traveling pants", "The Sisterhood of the Traveling Pants (film)\n\nThe Sisterhood of the Traveling Pants is a 2005 American coming-of-age comedy-drama film directed by Ken Kwapis from a screenplay by Delia Ephron and Elizabeth Chandler, based on the 2001 novel of the same name by Ann Brashares. It stars America Ferrera, Amber Tamblyn, Blake Lively, and Alexis Bledel. It follows four best friends who buy a mysterious pair of pants that fits each of them despite their differing sizes. They share the pants equally as they spend their first summer apart.\nIt was released in the United States on June 1, 2005 by Warner Bros. Pictures. A sequel, The Sisterhood of the Traveling Pants 2, was released on August 6, 2008. A third film is in development, as is a musical adaptation based on the first film.\nPlot\nLena Kaligaris (Alexis Bledel), Tibby Rollins (Amber Tamblyn), Carmen Lowell (America Ferrera), and Bridget Vreeland (Blake Lively) are teenagers from Bethesda, Maryland, who have been best friends their whole lives and are about to spend their first summer apart: Lena is visiting her grandparents in Santorini, Greece; Bridget is going to soccer camp in Baja California, Mexico; Carmen is visiting her father in South Carolina; and Tibby is staying home. While shopping together, they find a pair of jeans that inexplicably fit them all perfectly. They decide to share the jeans equally over the summer, before parting the next day."]], "1_corpus": "wikipedia"}
|
8 |
{"tstamp": 1723537572.0243, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "426586a862ef44ad8dc734ccbd06ded1", "0_model_name": "voyage-multilingual-2", "0_prompt": "In English when to use –, — vs -?", "0_output": [["In English when to use –, — vs -?", "English Stackexchange\n\nQ: What is the difference between `-` and `--` \nPossible Duplicate:\nWhen should I use an em-dash, an en-dash, and a hyphen? \n\nIs there any difference between:\n-\nand\n-- or in some cases, a long dash\nI have seen these two used lots of places but I really don't know the difference.\n\nA: There are three types of \"dashes\" in English: a hyphen (\"-\"), an en-dash (\"–\"), and an em-dash (\"—\"); to that you could also add a negative-sign or minus-sign from math.\nThe problem and confusion comes from not only knowing the difference, but trying to use them on a normal keyboard (assuming the font in use has the desired glyphs).\nIn typing class you may have learned to type -- for a dash, this would be an em-dash which is similar in usage to a comma. TeX users know to type --- to get an em-dash.\nThe en-dash is used between ranges, often numbers \"February 2–6\", but can also be \"Monday–Friday\". In TeX, you type -- to get an en-dash.\nAs Tom said, Microsoft Word has made a mess of this with its auto-correct settings.\n"]], "0_corpus": "stackexchange", "1_conv_id": "57679ddbe01546eb9b6a6c1fcd674611", "1_model_name": "Salesforce/SFR-Embedding-2_R", "1_prompt": "In English when to use –, — vs -?", "1_output": [["In English when to use –, — vs -?", "English Stackexchange\n\nQ: When to use -, – and —? \nPossible Duplicate:\nWhen should I use an em-dash, an en-dash, and a hyphen? \n\nThis is about hyphens (-), en-dashes (–) and em-dashes (—).\nWhen to use which one? To be honest, I always use em-dashes unless I join words with a hyphen, but I never use an en-dash.\n\nA: Hyphens are used in compound modifiers.\n\n\n*\n\n*a well-trained professional\n\n*over-the-counter drugs\n\n\nEn dashes are used in place of \"to\" to connect numbers or words.\n\n\n*\n\n*The London–Amsterdam flight is delayed.\n\n*The meeting is from 3:00–4:30 p.m.\n\n\nEm dashes help set off amplifying or explanatory statements.\n\n\n*\n\n*The vampire—it had been awake half the night—flew out of its coffin.\n\n"]], "1_corpus": "stackexchange"}
|
9 |
{"tstamp": 1723537919.0172, "task_type": "retrieval", "type": "leftvote", "models": ["", ""], "ip": "", "0_conv_id": "44e7efed02234d80b7cf4605374ea5ed", "0_model_name": "voyage-multilingual-2", "0_prompt": "A multilingual language model trained on a diverse set of languages and code, fine-tuned with safety instructions per the Biden-Harris Executive Order", "0_output": [["A multilingual language model trained on a diverse set of languages and code, fine-tuned with safety instructions per the Biden-Harris Executive Order", "Title: Aurora-M: The First Open Source Multilingual Language Model Red-teamed according to the U.S. Executive Order\n\nAbstract: Pretrained language models underpin several AI applications, but their high computational cost for training limits accessibility. Initiatives such as BLOOM and StarCoder aim to democratize access to pretrained models for collaborative community development. However, such existing models face challenges: limited multilingual capabilities, continual pretraining causing catastrophic forgetting, whereas pretraining from scratch is computationally expensive, and compliance with AI safety and development laws. This paper presents Aurora-M, a 15B parameter multilingual open-source model trained on English, Finnish, Hindi, Japanese, Vietnamese, and code. Continually pretrained from StarCoderPlus on 435 billion additional tokens, Aurora-M surpasses 2 trillion tokens in total training token count. It is the first open-source multilingual model fine-tuned on human-reviewed safety instructions, thus aligning its development not only with conventional red-teaming considerations, but also with the specific concerns articulated in the Biden-Harris Executive Order on the Safe, Secure, and Trustworthy Development and Use of Artificial Intelligence. Aurora-M is rigorously evaluated across various tasks and languages, demonstrating robustness against catastrophic forgetting and outperforming alternatives in multilingual settings, particularly in safety evaluations. To promote responsible open-source LLM development, Aurora-M and its variants are released at https://huggingface.co/collections/aurora-m/aurora-m-models-65fdfdff62471e09812f5407 ."]], "0_corpus": "arxiv", "1_conv_id": "013c2023216845a4923dc5ce3a314431", "1_model_name": "jinaai/jina-embeddings-v2-base-en", "1_prompt": "A multilingual language model trained on a diverse set of languages and code, fine-tuned with safety instructions per the Biden-Harris Executive Order", "1_output": [["A multilingual language model trained on a diverse set of languages and code, fine-tuned with safety instructions per the Biden-Harris Executive Order", "Title: Study of a New Neuron\n\nAbstract: We study a modular neuron alternative to the McCulloch-Pitts neuron that arises naturally in analog devices in which the neuron inputs are represented as coherent oscillatory wave signals. Although the modular neuron can compute $XOR$ at the one neuron level, it is still characterized by the same Vapnik-Chervonenkis dimension as the standard neuron. We give the formulas needed for constructing networks using the new neuron and training them using back-propagation. A numerical study of the modular neuron on two data sets is presented, which demonstrates that the new neuron performs at least as well as the standard neuron."]], "1_corpus": "arxiv"}
|
|
|
|
|
|
|
|
7 |
{"tstamp": 1723536949.1762, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "6a48d1094f4c493faf4120db25717b7a", "0_model_name": "text-embedding-004", "0_prompt": "who played bailey in the sisterhood of the traveling pants", "0_output": [["who played bailey in the sisterhood of the traveling pants", "Bailee Madison\n\nBailee Madison Riley (born October 15, 1999) is an American actress, singer and songwriter. Born in Fort Lauderdale, Florida, Madison began acting at just two weeks of age, appearing mostly in television commercials for the first few years. Her film debut, as May Belle Aarons in the fantasy drama Bridge to Terabithia (2007), brought her early praise. She gained wider acclaim for starring in the war drama film Brothers (2009), for which she was nominated for the Critics' Choice Movie Award for Best Young Performer. Madison went on to star in a number of horror and thriller films, beginning with Don't Be Afraid of the Dark (2010), gaining a reputation as a scream queen. She had commercial success with starring roles in the comedies Just Go with It (2011) and Parental Guidance (2012), though the critical reception of these films was mostly negative.\nMadison transitioned to television in ensuing years, first with recurring roles in The Haunting Hour: The Series (20102012), as young Snow White in Once Upon a Time (2012–2016), and as a teenager with multiple mental health issues in the drama series The Fosters (20142015). She played the leading roles of Hillary Harrison in Trophy Wife (2013–2014) and Grace Russell in Good Witch (20152021). Madison gained further praise and resurged her status as a scream queen with her leading role as Imogen Adams in the Max slasher horror series Pretty Little Liars (2022present), a spinoff of the eponymous original series. She subsequently debuted her solo music career with the release of the pop single \"Kinda Fun\" (2024).\nEarly life\nBailee Madison Riley was born in Fort Lauderdale, Florida, to Patricia and Michael Riley. The youngest of seven children, she has four brothers and two sisters. Her elder sister, Kaitlin Vilasuso, is also an actress. Her father is Canadian, and through him Madison has citizenship by birth."]], "0_corpus": "wikipedia", "1_conv_id": "280813e8dbe54bb79beb4813a7e1937f", "1_model_name": "mixedbread-ai/mxbai-embed-large-v1", "1_prompt": "who played bailey in the sisterhood of the traveling pants", "1_output": [["who played bailey in the sisterhood of the traveling pants", "The Sisterhood of the Traveling Pants (film)\n\nThe Sisterhood of the Traveling Pants is a 2005 American coming-of-age comedy-drama film directed by Ken Kwapis from a screenplay by Delia Ephron and Elizabeth Chandler, based on the 2001 novel of the same name by Ann Brashares. It stars America Ferrera, Amber Tamblyn, Blake Lively, and Alexis Bledel. It follows four best friends who buy a mysterious pair of pants that fits each of them despite their differing sizes. They share the pants equally as they spend their first summer apart.\nIt was released in the United States on June 1, 2005 by Warner Bros. Pictures. A sequel, The Sisterhood of the Traveling Pants 2, was released on August 6, 2008. A third film is in development, as is a musical adaptation based on the first film.\nPlot\nLena Kaligaris (Alexis Bledel), Tibby Rollins (Amber Tamblyn), Carmen Lowell (America Ferrera), and Bridget Vreeland (Blake Lively) are teenagers from Bethesda, Maryland, who have been best friends their whole lives and are about to spend their first summer apart: Lena is visiting her grandparents in Santorini, Greece; Bridget is going to soccer camp in Baja California, Mexico; Carmen is visiting her father in South Carolina; and Tibby is staying home. While shopping together, they find a pair of jeans that inexplicably fit them all perfectly. They decide to share the jeans equally over the summer, before parting the next day."]], "1_corpus": "wikipedia"}
|
8 |
{"tstamp": 1723537572.0243, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "426586a862ef44ad8dc734ccbd06ded1", "0_model_name": "voyage-multilingual-2", "0_prompt": "In English when to use –, — vs -?", "0_output": [["In English when to use –, — vs -?", "English Stackexchange\n\nQ: What is the difference between `-` and `--` \nPossible Duplicate:\nWhen should I use an em-dash, an en-dash, and a hyphen? \n\nIs there any difference between:\n-\nand\n-- or in some cases, a long dash\nI have seen these two used lots of places but I really don't know the difference.\n\nA: There are three types of \"dashes\" in English: a hyphen (\"-\"), an en-dash (\"–\"), and an em-dash (\"—\"); to that you could also add a negative-sign or minus-sign from math.\nThe problem and confusion comes from not only knowing the difference, but trying to use them on a normal keyboard (assuming the font in use has the desired glyphs).\nIn typing class you may have learned to type -- for a dash, this would be an em-dash which is similar in usage to a comma. TeX users know to type --- to get an em-dash.\nThe en-dash is used between ranges, often numbers \"February 2–6\", but can also be \"Monday–Friday\". In TeX, you type -- to get an en-dash.\nAs Tom said, Microsoft Word has made a mess of this with its auto-correct settings.\n"]], "0_corpus": "stackexchange", "1_conv_id": "57679ddbe01546eb9b6a6c1fcd674611", "1_model_name": "Salesforce/SFR-Embedding-2_R", "1_prompt": "In English when to use –, — vs -?", "1_output": [["In English when to use –, — vs -?", "English Stackexchange\n\nQ: When to use -, – and —? \nPossible Duplicate:\nWhen should I use an em-dash, an en-dash, and a hyphen? \n\nThis is about hyphens (-), en-dashes (–) and em-dashes (—).\nWhen to use which one? To be honest, I always use em-dashes unless I join words with a hyphen, but I never use an en-dash.\n\nA: Hyphens are used in compound modifiers.\n\n\n*\n\n*a well-trained professional\n\n*over-the-counter drugs\n\n\nEn dashes are used in place of \"to\" to connect numbers or words.\n\n\n*\n\n*The London–Amsterdam flight is delayed.\n\n*The meeting is from 3:00–4:30 p.m.\n\n\nEm dashes help set off amplifying or explanatory statements.\n\n\n*\n\n*The vampire—it had been awake half the night—flew out of its coffin.\n\n"]], "1_corpus": "stackexchange"}
|
9 |
{"tstamp": 1723537919.0172, "task_type": "retrieval", "type": "leftvote", "models": ["", ""], "ip": "", "0_conv_id": "44e7efed02234d80b7cf4605374ea5ed", "0_model_name": "voyage-multilingual-2", "0_prompt": "A multilingual language model trained on a diverse set of languages and code, fine-tuned with safety instructions per the Biden-Harris Executive Order", "0_output": [["A multilingual language model trained on a diverse set of languages and code, fine-tuned with safety instructions per the Biden-Harris Executive Order", "Title: Aurora-M: The First Open Source Multilingual Language Model Red-teamed according to the U.S. Executive Order\n\nAbstract: Pretrained language models underpin several AI applications, but their high computational cost for training limits accessibility. Initiatives such as BLOOM and StarCoder aim to democratize access to pretrained models for collaborative community development. However, such existing models face challenges: limited multilingual capabilities, continual pretraining causing catastrophic forgetting, whereas pretraining from scratch is computationally expensive, and compliance with AI safety and development laws. This paper presents Aurora-M, a 15B parameter multilingual open-source model trained on English, Finnish, Hindi, Japanese, Vietnamese, and code. Continually pretrained from StarCoderPlus on 435 billion additional tokens, Aurora-M surpasses 2 trillion tokens in total training token count. It is the first open-source multilingual model fine-tuned on human-reviewed safety instructions, thus aligning its development not only with conventional red-teaming considerations, but also with the specific concerns articulated in the Biden-Harris Executive Order on the Safe, Secure, and Trustworthy Development and Use of Artificial Intelligence. Aurora-M is rigorously evaluated across various tasks and languages, demonstrating robustness against catastrophic forgetting and outperforming alternatives in multilingual settings, particularly in safety evaluations. To promote responsible open-source LLM development, Aurora-M and its variants are released at https://huggingface.co/collections/aurora-m/aurora-m-models-65fdfdff62471e09812f5407 ."]], "0_corpus": "arxiv", "1_conv_id": "013c2023216845a4923dc5ce3a314431", "1_model_name": "jinaai/jina-embeddings-v2-base-en", "1_prompt": "A multilingual language model trained on a diverse set of languages and code, fine-tuned with safety instructions per the Biden-Harris Executive Order", "1_output": [["A multilingual language model trained on a diverse set of languages and code, fine-tuned with safety instructions per the Biden-Harris Executive Order", "Title: Study of a New Neuron\n\nAbstract: We study a modular neuron alternative to the McCulloch-Pitts neuron that arises naturally in analog devices in which the neuron inputs are represented as coherent oscillatory wave signals. Although the modular neuron can compute $XOR$ at the one neuron level, it is still characterized by the same Vapnik-Chervonenkis dimension as the standard neuron. We give the formulas needed for constructing networks using the new neuron and training them using back-propagation. A numerical study of the modular neuron on two data sets is presented, which demonstrates that the new neuron performs at least as well as the standard neuron."]], "1_corpus": "arxiv"}
|
10 |
+
{"tstamp": 1723538357.6471, "task_type": "retrieval", "type": "bothbadvote", "models": ["", ""], "ip": "", "0_conv_id": "7e2124c895704968b9ff68c140c18343", "0_model_name": "nomic-ai/nomic-embed-text-v1.5", "0_prompt": "does peanut butter dissolve in water?", "0_output": [["does peanut butter dissolve in water?", "Physics Stackexchange\n\nQ: Why does sugar dissolve faster in hot water compared to cold water? Why does sugar dissolve faster in hot water compared to cold water?\n\nA: I am going to have to respectfully disagree with Pranjal Rana. \nThe reason that sugar dissolves more quickly in hot water than in cold water has to do with the kinetic energy of the particles. \nThe faster (hotter) the water molecules are moving, the more energy they give to the sugar molecules when they collide with them. \nThink about it this way: a sugar crystal is just a lot of sugar molecules stuck together. You dissolve the crystal you need to hit the molecules with enough force to break them off of the rest of the crystal. \n"]], "0_corpus": "stackexchange", "1_conv_id": "5bdbad10a4b04ff593ed47628d0fa886", "1_model_name": "intfloat/e5-mistral-7b-instruct", "1_prompt": "does peanut butter dissolve in water?", "1_output": [["does peanut butter dissolve in water?", "Physics Stackexchange\n\nQ: Will a single atom of an element with greater atomic mass than the $\\rm H_2O$ molecule sink in a cup of water? Will a single atom of an element with greater atomic mass than the H2O molecule sink in a cup of water? \nA single atom or molecule of a substance is placed in a cup of water. Because of its specific gravity, I suppose it should sink. However, I have never heard about a colloidal system with a single molecule or element.\n\nA: Like dissolving cesium bromide in water? Entropy (the larger number of accessible microstates) makes atoms in a crystal on the bottom go in solution.\n"]], "1_corpus": "stackexchange"}
|
11 |
+
{"tstamp": 1723538412.7974, "task_type": "retrieval", "type": "leftvote", "models": ["", ""], "ip": "", "0_conv_id": "53559f96b25e482586c266fb7e244bcf", "0_model_name": "voyage-multilingual-2", "0_prompt": "Who is the president of israel?", "0_output": [["Who is the president of israel?", "President of Israel\n\nThe president of the State of Israel (, or ) is the head of state of Israel. The position is largely a ceremonial role, with executive power vested in the cabinet led by the prime minister. The incumbent president is Isaac Herzog, who took office on 7 July 2021. Presidents are elected by the Knesset for a single seven-year term."]], "0_corpus": "wikipedia", "1_conv_id": "43e9cb7043ee41eead9c91b0688a99fa", "1_model_name": "jinaai/jina-embeddings-v2-base-en", "1_prompt": "Who is the president of israel?", "1_output": [["Who is the president of israel?", "President of Israel\n\nOther activities The president confers the dignity of Righteous Among the Nations on the recommendation of Yad Vashem, presents the Wolf Prize on the recommendation of the Wolf Foundation, and awards the Presidential Medal of Honor, the President's Fund for Outstanding Doctoral Students, and the Presidential Award for Volunteerism in his absolute discretion. The President also participates in the awarding of the Israel Prize, which is held annually on Yom Ha'atzmaut. Moreover, the president serves as the keynote speaker at the opening ceremonies of the half-yearly Knesset conference, as well as at the annual official ceremonies for Yom Hazikaron and Yom HaShoah.\nPresidential backgrounds\nMost Israeli presidents were involved in national politics or Zionist activities before taking office. Some were also distinguished in other fields. For example, Chaim Weizmann was a leading research chemist who founded the Weizmann Institute of Science in Rehovot; Zalman Shazar was an author, poet, and journalist; and Chaim Herzog was a military leader, attorney, and diplomat.\nThe first Israeli presidents were born in the former Russian Empire. The first native-born president, as well as the first with a Sephardi background, was Yitzhak Navon. The first president with a Western European background was Chaim Herzog, who originally came from Belfast, United Kingdom. The first president with a Mizrahi background was Moshe Katsav, who was born in Iran.\nThe first president to be born in the modern state after Israel's declaration of independence is Isaac Herzog. He is also the first son of a former Israeli president to also become president. As of 2024, the only other presidents with close family ties were Chaim Weizmann and his nephew Ezer Weizman."]], "1_corpus": "wikipedia"}
|
12 |
+
{"tstamp": 1723538468.5017, "task_type": "retrieval", "type": "leftvote", "models": ["", ""], "ip": "", "0_conv_id": "c84f0720cf994dcabe87b3593a1155c0", "0_model_name": "GritLM/GritLM-7B", "0_prompt": "Who is the president of the Republic of Iran?", "0_output": [["Who is the president of the Republic of Iran?", "President of Iran\n\nThe most recent president of Iran was Ebrahim Raisi. He succeeded Hassan Rouhani, who served eight years in office from 2013 to 2021. On May 19, 2024, a helicopter carrying Raisi crashed in the East Azerbaijan Province of Iran. There were no survivors at the crash site. Raisi was the second president of Iran to have died in office. Taghi Rahmani, the husband of detained activist and Nobel laureate Narges Mohammadi, said Raisi's death would not structurally change the Iranian leadership under Khamenei. Following his death, first vice president Mohammad Mokhber was designated as acting president until new elections could be held on 28 June.\nTIME Magazine noted that presidential elections in Iran change nothing as Supreme Leader Khamenei — and not the President — wields the ultimate power. Tallha Abdulrazaq, an Iraqi researcher at the University of Exeter's Strategy and Security Institute, stated that Khamenei, the longtime Supreme Leader of Iran, always uses the president as a kind of a buffer zone between him and the people. \"Anything that goes right, Khamenei then can say 'I am the wise leader who put this guy in charge and he made the right policy decisions.' Anything that goes wrong, he can say 'we should get rid of this guy. He is not good for the country, he is not good for you.'\""]], "0_corpus": "wikipedia", "1_conv_id": "0275c79f3a50430e956eb19abb10424e", "1_model_name": "nomic-ai/nomic-embed-text-v1.5", "1_prompt": "Who is the president of the Republic of Iran?", "1_output": [["Who is the president of the Republic of Iran?", "President of Iran\n\nThe president of Iran () is the head of government of the Islamic Republic of Iran and the second highest-ranking official, after the Supreme Leader. The first election was held in 1980 and was won by Abulhassan Banisadr. Mohammad Mokhber currently serves as the acting President of Iran following the death of Ebrahim Raisi, the former president, on 19 May 2024. He is expected to be succeeded by Masoud Pezeshkian on July 30, who won the 2024 Iranian presidential election.\nHistory\nAfter the Iranian Revolution of 1979 and 1979 Iranian Islamic Republic referendum on March 29 and 30, the new government needed to craft a new constitution. Supreme Leader Ruhollah Khomeini, ordered an election for the Assembly of Experts, the body tasked with writing the constitution. The assembly presented the constitution on October 24, 1979, and Supreme Leader Ruhollah Khomeini and Prime Minister Mehdi Bazargan approved it."]], "1_corpus": "wikipedia"}
|
data/retrieval_individual-f36c9f97-e795-4522-8eb7-7dee254b42c7.jsonl
CHANGED
@@ -40,3 +40,13 @@
|
|
40 |
{"tstamp": 1723537458.003, "task_type": "retrieval", "type": "chat", "model": "Salesforce/SFR-Embedding-2_R", "gen_params": {}, "start": 1723537457.6315, "finish": 1723537458.003, "ip": "", "conv_id": "57679ddbe01546eb9b6a6c1fcd674611", "model_name": "Salesforce/SFR-Embedding-2_R", "prompt": "In English when to use –, — vs -?", "output": [["In English when to use –, — vs -?", "English Stackexchange\n\nQ: When to use -, – and —? \nPossible Duplicate:\nWhen should I use an em-dash, an en-dash, and a hyphen? \n\nThis is about hyphens (-), en-dashes (–) and em-dashes (—).\nWhen to use which one? To be honest, I always use em-dashes unless I join words with a hyphen, but I never use an en-dash.\n\nA: Hyphens are used in compound modifiers.\n\n\n*\n\n*a well-trained professional\n\n*over-the-counter drugs\n\n\nEn dashes are used in place of \"to\" to connect numbers or words.\n\n\n*\n\n*The London–Amsterdam flight is delayed.\n\n*The meeting is from 3:00–4:30 p.m.\n\n\nEm dashes help set off amplifying or explanatory statements.\n\n\n*\n\n*The vampire—it had been awake half the night—flew out of its coffin.\n\n"]], "corpus": "stackexchange"}
|
41 |
{"tstamp": 1723537809.1696, "task_type": "retrieval", "type": "chat", "model": "voyage-multilingual-2", "gen_params": {}, "start": 1723537808.7709, "finish": 1723537809.1696, "ip": "", "conv_id": "44e7efed02234d80b7cf4605374ea5ed", "model_name": "voyage-multilingual-2", "prompt": "A multilingual language model trained on a diverse set of languages and code, fine-tuned with safety instructions per the Biden-Harris Executive Order", "output": [["A multilingual language model trained on a diverse set of languages and code, fine-tuned with safety instructions per the Biden-Harris Executive Order", "Title: Aurora-M: The First Open Source Multilingual Language Model Red-teamed according to the U.S. Executive Order\n\nAbstract: Pretrained language models underpin several AI applications, but their high computational cost for training limits accessibility. Initiatives such as BLOOM and StarCoder aim to democratize access to pretrained models for collaborative community development. However, such existing models face challenges: limited multilingual capabilities, continual pretraining causing catastrophic forgetting, whereas pretraining from scratch is computationally expensive, and compliance with AI safety and development laws. This paper presents Aurora-M, a 15B parameter multilingual open-source model trained on English, Finnish, Hindi, Japanese, Vietnamese, and code. Continually pretrained from StarCoderPlus on 435 billion additional tokens, Aurora-M surpasses 2 trillion tokens in total training token count. It is the first open-source multilingual model fine-tuned on human-reviewed safety instructions, thus aligning its development not only with conventional red-teaming considerations, but also with the specific concerns articulated in the Biden-Harris Executive Order on the Safe, Secure, and Trustworthy Development and Use of Artificial Intelligence. Aurora-M is rigorously evaluated across various tasks and languages, demonstrating robustness against catastrophic forgetting and outperforming alternatives in multilingual settings, particularly in safety evaluations. To promote responsible open-source LLM development, Aurora-M and its variants are released at https://huggingface.co/collections/aurora-m/aurora-m-models-65fdfdff62471e09812f5407 ."]], "corpus": "arxiv"}
|
42 |
{"tstamp": 1723537809.1696, "task_type": "retrieval", "type": "chat", "model": "jinaai/jina-embeddings-v2-base-en", "gen_params": {}, "start": 1723537808.7709, "finish": 1723537809.1696, "ip": "", "conv_id": "013c2023216845a4923dc5ce3a314431", "model_name": "jinaai/jina-embeddings-v2-base-en", "prompt": "A multilingual language model trained on a diverse set of languages and code, fine-tuned with safety instructions per the Biden-Harris Executive Order", "output": [["A multilingual language model trained on a diverse set of languages and code, fine-tuned with safety instructions per the Biden-Harris Executive Order", "Title: Study of a New Neuron\n\nAbstract: We study a modular neuron alternative to the McCulloch-Pitts neuron that arises naturally in analog devices in which the neuron inputs are represented as coherent oscillatory wave signals. Although the modular neuron can compute $XOR$ at the one neuron level, it is still characterized by the same Vapnik-Chervonenkis dimension as the standard neuron. We give the formulas needed for constructing networks using the new neuron and training them using back-propagation. A numerical study of the modular neuron on two data sets is presented, which demonstrates that the new neuron performs at least as well as the standard neuron."]], "corpus": "arxiv"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
{"tstamp": 1723537458.003, "task_type": "retrieval", "type": "chat", "model": "Salesforce/SFR-Embedding-2_R", "gen_params": {}, "start": 1723537457.6315, "finish": 1723537458.003, "ip": "", "conv_id": "57679ddbe01546eb9b6a6c1fcd674611", "model_name": "Salesforce/SFR-Embedding-2_R", "prompt": "In English when to use –, — vs -?", "output": [["In English when to use –, — vs -?", "English Stackexchange\n\nQ: When to use -, – and —? \nPossible Duplicate:\nWhen should I use an em-dash, an en-dash, and a hyphen? \n\nThis is about hyphens (-), en-dashes (–) and em-dashes (—).\nWhen to use which one? To be honest, I always use em-dashes unless I join words with a hyphen, but I never use an en-dash.\n\nA: Hyphens are used in compound modifiers.\n\n\n*\n\n*a well-trained professional\n\n*over-the-counter drugs\n\n\nEn dashes are used in place of \"to\" to connect numbers or words.\n\n\n*\n\n*The London–Amsterdam flight is delayed.\n\n*The meeting is from 3:00–4:30 p.m.\n\n\nEm dashes help set off amplifying or explanatory statements.\n\n\n*\n\n*The vampire—it had been awake half the night—flew out of its coffin.\n\n"]], "corpus": "stackexchange"}
|
41 |
{"tstamp": 1723537809.1696, "task_type": "retrieval", "type": "chat", "model": "voyage-multilingual-2", "gen_params": {}, "start": 1723537808.7709, "finish": 1723537809.1696, "ip": "", "conv_id": "44e7efed02234d80b7cf4605374ea5ed", "model_name": "voyage-multilingual-2", "prompt": "A multilingual language model trained on a diverse set of languages and code, fine-tuned with safety instructions per the Biden-Harris Executive Order", "output": [["A multilingual language model trained on a diverse set of languages and code, fine-tuned with safety instructions per the Biden-Harris Executive Order", "Title: Aurora-M: The First Open Source Multilingual Language Model Red-teamed according to the U.S. Executive Order\n\nAbstract: Pretrained language models underpin several AI applications, but their high computational cost for training limits accessibility. Initiatives such as BLOOM and StarCoder aim to democratize access to pretrained models for collaborative community development. However, such existing models face challenges: limited multilingual capabilities, continual pretraining causing catastrophic forgetting, whereas pretraining from scratch is computationally expensive, and compliance with AI safety and development laws. This paper presents Aurora-M, a 15B parameter multilingual open-source model trained on English, Finnish, Hindi, Japanese, Vietnamese, and code. Continually pretrained from StarCoderPlus on 435 billion additional tokens, Aurora-M surpasses 2 trillion tokens in total training token count. It is the first open-source multilingual model fine-tuned on human-reviewed safety instructions, thus aligning its development not only with conventional red-teaming considerations, but also with the specific concerns articulated in the Biden-Harris Executive Order on the Safe, Secure, and Trustworthy Development and Use of Artificial Intelligence. Aurora-M is rigorously evaluated across various tasks and languages, demonstrating robustness against catastrophic forgetting and outperforming alternatives in multilingual settings, particularly in safety evaluations. To promote responsible open-source LLM development, Aurora-M and its variants are released at https://huggingface.co/collections/aurora-m/aurora-m-models-65fdfdff62471e09812f5407 ."]], "corpus": "arxiv"}
|
42 |
{"tstamp": 1723537809.1696, "task_type": "retrieval", "type": "chat", "model": "jinaai/jina-embeddings-v2-base-en", "gen_params": {}, "start": 1723537808.7709, "finish": 1723537809.1696, "ip": "", "conv_id": "013c2023216845a4923dc5ce3a314431", "model_name": "jinaai/jina-embeddings-v2-base-en", "prompt": "A multilingual language model trained on a diverse set of languages and code, fine-tuned with safety instructions per the Biden-Harris Executive Order", "output": [["A multilingual language model trained on a diverse set of languages and code, fine-tuned with safety instructions per the Biden-Harris Executive Order", "Title: Study of a New Neuron\n\nAbstract: We study a modular neuron alternative to the McCulloch-Pitts neuron that arises naturally in analog devices in which the neuron inputs are represented as coherent oscillatory wave signals. Although the modular neuron can compute $XOR$ at the one neuron level, it is still characterized by the same Vapnik-Chervonenkis dimension as the standard neuron. We give the formulas needed for constructing networks using the new neuron and training them using back-propagation. A numerical study of the modular neuron on two data sets is presented, which demonstrates that the new neuron performs at least as well as the standard neuron."]], "corpus": "arxiv"}
|
43 |
+
{"tstamp": 1723538324.8573, "task_type": "retrieval", "type": "chat", "model": "nomic-ai/nomic-embed-text-v1.5", "gen_params": {}, "start": 1723538324.635, "finish": 1723538324.8573, "ip": "", "conv_id": "7e2124c895704968b9ff68c140c18343", "model_name": "nomic-ai/nomic-embed-text-v1.5", "prompt": "does peanut butter dissolve in water?", "output": [["does peanut butter dissolve in water?", "Physics Stackexchange\n\nQ: Why does sugar dissolve faster in hot water compared to cold water? Why does sugar dissolve faster in hot water compared to cold water?\n\nA: I am going to have to respectfully disagree with Pranjal Rana. \nThe reason that sugar dissolves more quickly in hot water than in cold water has to do with the kinetic energy of the particles. \nThe faster (hotter) the water molecules are moving, the more energy they give to the sugar molecules when they collide with them. \nThink about it this way: a sugar crystal is just a lot of sugar molecules stuck together. You dissolve the crystal you need to hit the molecules with enough force to break them off of the rest of the crystal. \n"]], "corpus": "stackexchange"}
|
44 |
+
{"tstamp": 1723538324.8573, "task_type": "retrieval", "type": "chat", "model": "intfloat/e5-mistral-7b-instruct", "gen_params": {}, "start": 1723538324.635, "finish": 1723538324.8573, "ip": "", "conv_id": "5bdbad10a4b04ff593ed47628d0fa886", "model_name": "intfloat/e5-mistral-7b-instruct", "prompt": "does peanut butter dissolve in water?", "output": [["does peanut butter dissolve in water?", "Physics Stackexchange\n\nQ: Will a single atom of an element with greater atomic mass than the $\\rm H_2O$ molecule sink in a cup of water? Will a single atom of an element with greater atomic mass than the H2O molecule sink in a cup of water? \nA single atom or molecule of a substance is placed in a cup of water. Because of its specific gravity, I suppose it should sink. However, I have never heard about a colloidal system with a single molecule or element.\n\nA: Like dissolving cesium bromide in water? Entropy (the larger number of accessible microstates) makes atoms in a crystal on the bottom go in solution.\n"]], "corpus": "stackexchange"}
|
45 |
+
{"tstamp": 1723538351.2901, "task_type": "retrieval", "type": "chat", "model": "intfloat/e5-mistral-7b-instruct", "gen_params": {}, "start": 1723538350.6825, "finish": 1723538351.2901, "ip": "", "conv_id": "4fd2c8ef5c1147c78754f5d8ab63a08f", "model_name": "intfloat/e5-mistral-7b-instruct", "prompt": "Who is the president of israel?", "output": [["Who is the president of israel?", "Meta Stackexchange\n\nQ: Why is the JavaScript file that deal with Winter Bash hats called \"hakovaim.js\"? Looking through Network tab on Chrome developer tools, I noticed a file called \"hakovaim.js\" and in it seems to be code that controls the Winter Bash hats.\nWhy was this name chosen? The word \"hakovaim\" means \"the hats\" in Hebrew. I was wondering if this was created by a Hebrew speaking person, or is that just a coincidence?\nAnd if it is the Hebrew word, then why would that be used on this file name instead of a more useful English word?\n(Hopefully the person who actually created that file will see this question, but if anyone else knows, I am really curious about this.)\n\nA: Winter Bash 2020 was largely created by Yaakov Ellis, who lives in Israel and does indeed speak Hebrew.\nHe probably included the name as a sort of Easter Egg.\nולמה שלא יעשה את זה?\n"]], "corpus": "stackexchange"}
|
46 |
+
{"tstamp": 1723538351.2901, "task_type": "retrieval", "type": "chat", "model": "BM25", "gen_params": {}, "start": 1723538350.6825, "finish": 1723538351.2901, "ip": "", "conv_id": "832b584372e149c38fffe85ad5afd5d9", "model_name": "BM25", "prompt": "Who is the president of israel?", "output": [["Who is the president of israel?", "Ell Stackexchange\n\nQ: What does it mean by \"asleep at the watch\"? What does it mean by \"asleep at the watch\"?\n\nThe childish awe that accompanied the president stands in unfortunate contrast to the indifference greeting the next state visit to Israel. Unlike the Trump visit, this one will mark an historic occasion: the first-ever by the prime minister of this country. This individual presides over an economy that is growing at a dizzying pace and could one day emerge as the world’s largest. Yet days before the arrival of the world's most important prime minister – India's Narendra Modi – Israel is asleep at the watch. (Meirav Arlosoroff / Analysis)\n\nHaaretz: India's Narendra Modi Visits Israel, Sees Israeli Desalination Tech at Beach With Netanyahu\n\nA: The problem here is that the person who wrote this combined two different sayings. This is a very common mistake made by English speaking people today. One saying is: Falling asleep at the wheel—meaning falling asleep at the steering wheel, while driving. The other is: Sleeping on watch—meaning falling asleep while on guard duty, the night watch. Hence, asleep at the wheel, and sleeping on watch—asleep at the watch.\n"]], "corpus": "stackexchange"}
|
47 |
+
{"tstamp": 1723538391.3737, "task_type": "retrieval", "type": "chat", "model": "voyage-multilingual-2", "gen_params": {}, "start": 1723538387.8997, "finish": 1723538391.3737, "ip": "", "conv_id": "53559f96b25e482586c266fb7e244bcf", "model_name": "voyage-multilingual-2", "prompt": "Who is the president of israel?", "output": [["Who is the president of israel?", "President of Israel\n\nThe president of the State of Israel (, or ) is the head of state of Israel. The position is largely a ceremonial role, with executive power vested in the cabinet led by the prime minister. The incumbent president is Isaac Herzog, who took office on 7 July 2021. Presidents are elected by the Knesset for a single seven-year term."]], "corpus": "wikipedia"}
|
48 |
+
{"tstamp": 1723538391.3737, "task_type": "retrieval", "type": "chat", "model": "jinaai/jina-embeddings-v2-base-en", "gen_params": {}, "start": 1723538387.8997, "finish": 1723538391.3737, "ip": "", "conv_id": "43e9cb7043ee41eead9c91b0688a99fa", "model_name": "jinaai/jina-embeddings-v2-base-en", "prompt": "Who is the president of israel?", "output": [["Who is the president of israel?", "President of Israel\n\nOther activities The president confers the dignity of Righteous Among the Nations on the recommendation of Yad Vashem, presents the Wolf Prize on the recommendation of the Wolf Foundation, and awards the Presidential Medal of Honor, the President's Fund for Outstanding Doctoral Students, and the Presidential Award for Volunteerism in his absolute discretion. The President also participates in the awarding of the Israel Prize, which is held annually on Yom Ha'atzmaut. Moreover, the president serves as the keynote speaker at the opening ceremonies of the half-yearly Knesset conference, as well as at the annual official ceremonies for Yom Hazikaron and Yom HaShoah.\nPresidential backgrounds\nMost Israeli presidents were involved in national politics or Zionist activities before taking office. Some were also distinguished in other fields. For example, Chaim Weizmann was a leading research chemist who founded the Weizmann Institute of Science in Rehovot; Zalman Shazar was an author, poet, and journalist; and Chaim Herzog was a military leader, attorney, and diplomat.\nThe first Israeli presidents were born in the former Russian Empire. The first native-born president, as well as the first with a Sephardi background, was Yitzhak Navon. The first president with a Western European background was Chaim Herzog, who originally came from Belfast, United Kingdom. The first president with a Mizrahi background was Moshe Katsav, who was born in Iran.\nThe first president to be born in the modern state after Israel's declaration of independence is Isaac Herzog. He is also the first son of a former Israeli president to also become president. As of 2024, the only other presidents with close family ties were Chaim Weizmann and his nephew Ezer Weizman."]], "corpus": "wikipedia"}
|
49 |
+
{"tstamp": 1723538439.397, "task_type": "retrieval", "type": "chat", "model": "GritLM/GritLM-7B", "gen_params": {}, "start": 1723538439.1559, "finish": 1723538439.397, "ip": "", "conv_id": "c84f0720cf994dcabe87b3593a1155c0", "model_name": "GritLM/GritLM-7B", "prompt": "Who is the president of the Republic of Iran?", "output": [["Who is the president of the Republic of Iran?", "President of Iran\n\nThe most recent president of Iran was Ebrahim Raisi. He succeeded Hassan Rouhani, who served eight years in office from 2013 to 2021. On May 19, 2024, a helicopter carrying Raisi crashed in the East Azerbaijan Province of Iran. There were no survivors at the crash site. Raisi was the second president of Iran to have died in office. Taghi Rahmani, the husband of detained activist and Nobel laureate Narges Mohammadi, said Raisi's death would not structurally change the Iranian leadership under Khamenei. Following his death, first vice president Mohammad Mokhber was designated as acting president until new elections could be held on 28 June.\nTIME Magazine noted that presidential elections in Iran change nothing as Supreme Leader Khamenei — and not the President — wields the ultimate power. Tallha Abdulrazaq, an Iraqi researcher at the University of Exeter's Strategy and Security Institute, stated that Khamenei, the longtime Supreme Leader of Iran, always uses the president as a kind of a buffer zone between him and the people. \"Anything that goes right, Khamenei then can say 'I am the wise leader who put this guy in charge and he made the right policy decisions.' Anything that goes wrong, he can say 'we should get rid of this guy. He is not good for the country, he is not good for you.'\""]], "corpus": "wikipedia"}
|
50 |
+
{"tstamp": 1723538439.397, "task_type": "retrieval", "type": "chat", "model": "nomic-ai/nomic-embed-text-v1.5", "gen_params": {}, "start": 1723538439.1559, "finish": 1723538439.397, "ip": "", "conv_id": "0275c79f3a50430e956eb19abb10424e", "model_name": "nomic-ai/nomic-embed-text-v1.5", "prompt": "Who is the president of the Republic of Iran?", "output": [["Who is the president of the Republic of Iran?", "President of Iran\n\nThe president of Iran () is the head of government of the Islamic Republic of Iran and the second highest-ranking official, after the Supreme Leader. The first election was held in 1980 and was won by Abulhassan Banisadr. Mohammad Mokhber currently serves as the acting President of Iran following the death of Ebrahim Raisi, the former president, on 19 May 2024. He is expected to be succeeded by Masoud Pezeshkian on July 30, who won the 2024 Iranian presidential election.\nHistory\nAfter the Iranian Revolution of 1979 and 1979 Iranian Islamic Republic referendum on March 29 and 30, the new government needed to craft a new constitution. Supreme Leader Ruhollah Khomeini, ordered an election for the Assembly of Experts, the body tasked with writing the constitution. The assembly presented the constitution on October 24, 1979, and Supreme Leader Ruhollah Khomeini and Prime Minister Mehdi Bazargan approved it."]], "corpus": "wikipedia"}
|
51 |
+
{"tstamp": 1723538497.3742, "task_type": "retrieval", "type": "chat", "model": "text-embedding-004", "gen_params": {}, "start": 1723538495.0551, "finish": 1723538497.3742, "ip": "", "conv_id": "1d334bdb18ee4fd2a55421cf55424d13", "model_name": "text-embedding-004", "prompt": "What is LORA for model fine tuning?", "output": [["What is LORA for model fine tuning?", "Fine-tuning (deep learning)\n\nLow-rank adaptation Low-rank adaptation (LoRA) is an adapter-based technique for efficiently fine-tuning models. The basic idea is to design a low-rank matrix that is then added to the original matrix. An adapter, in this context, is a collection of low-rank matrices which, when added to a base model, produces a fine-tuned model. It allows for performance that approaches full-model fine-tuning with less space requirement. A language model with billions of parameters may be LoRA fine-tuned with only several millions of parameters.\nLoRA-based fine-tuning has become popular in the Stable Diffusion community. Support for LoRA was integrated into the Diffusers library from Hugging Face. Support for LoRA and similar techniques is also available for a wide range of other models through Hugging Face's Parameter-Efficient Fine-Tuning (PEFT) package.\nRepresentation fine-tuning\nRepresentation fine-tuning (ReFT) is a novel technique developed by researchers at Stanford University aimed at fine-tuning large language models (LLMs) by modifying less than 1% of their representations. Unlike traditional parameter-efficient fine-tuning (PEFT) methods, which mainly focus on updating weights, ReFT targets specific parts of the model relevant to the task being fine-tuned. This approach is based on the understanding that deep learning models encode rich semantic information in their representations, suggesting that modifying representations might be a more effective strategy than updating weights."]], "corpus": "wikipedia"}
|
52 |
+
{"tstamp": 1723538497.3742, "task_type": "retrieval", "type": "chat", "model": "mixedbread-ai/mxbai-embed-large-v1", "gen_params": {}, "start": 1723538495.0551, "finish": 1723538497.3742, "ip": "", "conv_id": "fe4923c1db634e9cbbb91d98232abbaa", "model_name": "mixedbread-ai/mxbai-embed-large-v1", "prompt": "What is LORA for model fine tuning?", "output": [["What is LORA for model fine tuning?", "Fine-tuning (deep learning)\n\nLow-rank adaptation Low-rank adaptation (LoRA) is an adapter-based technique for efficiently fine-tuning models. The basic idea is to design a low-rank matrix that is then added to the original matrix. An adapter, in this context, is a collection of low-rank matrices which, when added to a base model, produces a fine-tuned model. It allows for performance that approaches full-model fine-tuning with less space requirement. A language model with billions of parameters may be LoRA fine-tuned with only several millions of parameters.\nLoRA-based fine-tuning has become popular in the Stable Diffusion community. Support for LoRA was integrated into the Diffusers library from Hugging Face. Support for LoRA and similar techniques is also available for a wide range of other models through Hugging Face's Parameter-Efficient Fine-Tuning (PEFT) package.\nRepresentation fine-tuning\nRepresentation fine-tuning (ReFT) is a novel technique developed by researchers at Stanford University aimed at fine-tuning large language models (LLMs) by modifying less than 1% of their representations. Unlike traditional parameter-efficient fine-tuning (PEFT) methods, which mainly focus on updating weights, ReFT targets specific parts of the model relevant to the task being fine-tuned. This approach is based on the understanding that deep learning models encode rich semantic information in their representations, suggesting that modifying representations might be a more effective strategy than updating weights."]], "corpus": "wikipedia"}
|