Muennighoff
commited on
Scheduled Commit
Browse files
data/clustering_battle-b9229914-47bc-4da8-a21b-89329fff8207.jsonl
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"tstamp": 1722310865.1737, "task_type": "clustering", "type": "tievote", "models": ["", ""], "ip": "", "0_conv_id": "27540354989c4194bafd0930900e26c7", "0_model_name": "intfloat/multilingual-e5-large-instruct", "0_prompt": ["topaz", "amethyst", "diamond", "emerald", "ruby", "opal", "elm", "oak", "cedar", "maple", "mackerel", "tuna", "orchid", "rose", "daisy", "lily", "sunflower"], "0_ncluster": 4, "0_output": "", "0_ndim": "3D (press for 2D)", "0_dim_method": "PCA", "0_clustering_method": "KMeans", "1_conv_id": "2ece1304a5c040e69507bc9c237d0712", "1_model_name": "intfloat/multilingual-e5-large-instruct", "1_prompt": ["topaz", "amethyst", "diamond", "emerald", "ruby", "opal", "elm", "oak", "cedar", "maple", "mackerel", "tuna", "orchid", "rose", "daisy", "lily", "sunflower"], "1_ncluster": 4, "1_output": "", "1_ndim": "3D (press for 2D)", "1_dim_method": "PCA", "1_clustering_method": "KMeans"}
|
data/clustering_individual-b9229914-47bc-4da8-a21b-89329fff8207.jsonl
CHANGED
@@ -20,3 +20,5 @@
|
|
20 |
{"tstamp": 1722309488.9581, "task_type": "clustering", "type": "chat", "model": "intfloat/e5-mistral-7b-instruct", "gen_params": {}, "start": 1722309488.869, "finish": 1722309488.9581, "ip": "", "conv_id": "3be1622cdbfc466e89a86f79a6119eb3", "model_name": "intfloat/e5-mistral-7b-instruct", "prompt": ["rooibos", "pu-erh", "black", "chamomile", "oolong", "green", "willow", "maple", "birch", "oak", "conscientiousness", "openness", "neuroticism"], "ncluster": 3, "output": "", "ndim": "2D (press for 3D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
21 |
{"tstamp": 1722309491.4668, "task_type": "clustering", "type": "chat", "model": "intfloat/multilingual-e5-large-instruct", "gen_params": {}, "start": 1722309491.3788, "finish": 1722309491.4668, "ip": "", "conv_id": "f8359dda2c804fb18c356875be073109", "model_name": "intfloat/multilingual-e5-large-instruct", "prompt": ["rooibos", "pu-erh", "black", "chamomile", "oolong", "green", "willow", "maple", "birch", "oak", "conscientiousness", "openness", "neuroticism"], "ncluster": 3, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
22 |
{"tstamp": 1722309491.4668, "task_type": "clustering", "type": "chat", "model": "intfloat/e5-mistral-7b-instruct", "gen_params": {}, "start": 1722309491.3788, "finish": 1722309491.4668, "ip": "", "conv_id": "3be1622cdbfc466e89a86f79a6119eb3", "model_name": "intfloat/e5-mistral-7b-instruct", "prompt": ["rooibos", "pu-erh", "black", "chamomile", "oolong", "green", "willow", "maple", "birch", "oak", "conscientiousness", "openness", "neuroticism"], "ncluster": 3, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
|
|
|
|
|
20 |
{"tstamp": 1722309488.9581, "task_type": "clustering", "type": "chat", "model": "intfloat/e5-mistral-7b-instruct", "gen_params": {}, "start": 1722309488.869, "finish": 1722309488.9581, "ip": "", "conv_id": "3be1622cdbfc466e89a86f79a6119eb3", "model_name": "intfloat/e5-mistral-7b-instruct", "prompt": ["rooibos", "pu-erh", "black", "chamomile", "oolong", "green", "willow", "maple", "birch", "oak", "conscientiousness", "openness", "neuroticism"], "ncluster": 3, "output": "", "ndim": "2D (press for 3D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
21 |
{"tstamp": 1722309491.4668, "task_type": "clustering", "type": "chat", "model": "intfloat/multilingual-e5-large-instruct", "gen_params": {}, "start": 1722309491.3788, "finish": 1722309491.4668, "ip": "", "conv_id": "f8359dda2c804fb18c356875be073109", "model_name": "intfloat/multilingual-e5-large-instruct", "prompt": ["rooibos", "pu-erh", "black", "chamomile", "oolong", "green", "willow", "maple", "birch", "oak", "conscientiousness", "openness", "neuroticism"], "ncluster": 3, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
22 |
{"tstamp": 1722309491.4668, "task_type": "clustering", "type": "chat", "model": "intfloat/e5-mistral-7b-instruct", "gen_params": {}, "start": 1722309491.3788, "finish": 1722309491.4668, "ip": "", "conv_id": "3be1622cdbfc466e89a86f79a6119eb3", "model_name": "intfloat/e5-mistral-7b-instruct", "prompt": ["rooibos", "pu-erh", "black", "chamomile", "oolong", "green", "willow", "maple", "birch", "oak", "conscientiousness", "openness", "neuroticism"], "ncluster": 3, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
23 |
+
{"tstamp": 1722310847.2603, "task_type": "clustering", "type": "chat", "model": "intfloat/multilingual-e5-large-instruct", "gen_params": {}, "start": 1722310847.1808, "finish": 1722310847.2603, "ip": "", "conv_id": "27540354989c4194bafd0930900e26c7", "model_name": "intfloat/multilingual-e5-large-instruct", "prompt": ["topaz", "amethyst", "diamond", "emerald", "ruby", "opal", "elm", "oak", "cedar", "maple", "mackerel", "tuna", "orchid", "rose", "daisy", "lily", "sunflower"], "ncluster": 4, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
24 |
+
{"tstamp": 1722310847.2603, "task_type": "clustering", "type": "chat", "model": "intfloat/multilingual-e5-large-instruct", "gen_params": {}, "start": 1722310847.1808, "finish": 1722310847.2603, "ip": "", "conv_id": "2ece1304a5c040e69507bc9c237d0712", "model_name": "intfloat/multilingual-e5-large-instruct", "prompt": ["topaz", "amethyst", "diamond", "emerald", "ruby", "opal", "elm", "oak", "cedar", "maple", "mackerel", "tuna", "orchid", "rose", "daisy", "lily", "sunflower"], "ncluster": 4, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
data/retrieval_battle-b9229914-47bc-4da8-a21b-89329fff8207.jsonl
CHANGED
@@ -9,3 +9,9 @@
|
|
9 |
{"tstamp": 1722310291.9285, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "9776843ed00d4cab89c0894cad832500", "0_model_name": "sentence-transformers/all-MiniLM-L6-v2", "0_prompt": "what are chapters in the bible called?", "0_output": [["what are chapters in the bible called?", "Stackoverflow Stackexchange\n\nQ: How to strip executables thoroughly I'd like to strip as much as I can - on Linux: an ELF. I only want in there the stuff I need to run it.\nI tried using strip:\nstrip --strip-all elf\n\nBut it doesn't seem to do a good job: nm still displays lots of stuff, and the binary is still big.\nWhat should I do?\n\nA: I would check out this great article that goes into depth on making an ELF executable as small as possible. Maybe it has info that could help!\nhttp://www.muppetlabs.com/~breadbox/software/tiny/teensy.html\n"]], "0_corpus": "stackexchange", "1_conv_id": "87810785bece4d6dab306293a64980f2", "1_model_name": "BAAI/bge-large-en-v1.5", "1_prompt": "what are chapters in the bible called?", "1_output": [["what are chapters in the bible called?", "English Stackexchange\n\nQ: Why are the divisions of the Bible called \"verses\"? In common parlance, a verse is a writing arranged with a metrical rhythm, typically having a rhyme. Most of the books of Bible contain prose which do not follow metrical rhythm or rhyme. But their sentences are called verses. My question therefore is: Why are the divisions of the Bible called verses?\n\nA: A number of questions on Stack Exchange - English Language and Usage have commented on the words 'stanza' and 'verse' in relation to poetry and to scripture.\n'Verses' are also used in Shakespeare.\nMerriam Webster defines a 'verse' in three ways : metrical verse, a stanza or a biblical verse.\nIn any work which requires to be referenced and quoted as part of its function, stanzas or verses are numbered accordingly so that they can be cited.\n"]], "1_corpus": "stackexchange"}
|
10 |
{"tstamp": 1722310355.5437, "task_type": "retrieval", "type": "tievote", "models": ["", ""], "ip": "", "0_conv_id": "256e5bb6399a4ba7b599c9ba9a402e95", "0_model_name": "embed-english-v3.0", "0_prompt": "Learning word representations to improve IR tasks like query expansion and classification", "0_output": [["Learning word representations to improve IR tasks like query expansion and classification", "Title: Relevance-based Word Embedding\n\nAbstract: Learning a high-dimensional dense representation for vocabulary terms, also known as a word embedding, has recently attracted much attention in natural language processing and information retrieval tasks. The embedding vectors are typically learned based on term proximity in a large corpus. This means that the objective in well-known word embedding algorithms, e.g., word2vec, is to accurately predict adjacent word(s) for a given word or context. However, this objective is not necessarily equivalent to the goal of many information retrieval (IR) tasks. The primary objective in various IR tasks is to capture relevance instead of term proximity, syntactic, or even semantic similarity. This is the motivation for developing unsupervised relevance-based word embedding models that learn word representations based on query-document relevance information. In this paper, we propose two learning models with different objective functions; one learns a relevance distribution over the vocabulary set for each query, and the other classifies each term as belonging to the relevant or non-relevant class for each query. To train our models, we used over six million unique queries and the top ranked documents retrieved in response to each query, which are assumed to be relevant to the query. We extrinsically evaluate our learned word representation models using two IR tasks: query expansion and query classification. Both query expansion experiments on four TREC collections and query classification experiments on the KDD Cup 2005 dataset suggest that the relevance-based word embedding models significantly outperform state-of-the-art proximity-based embedding models, such as word2vec and GloVe."]], "0_corpus": "arxiv", "1_conv_id": "2f7dc574c8b94ae7a397af80c4cf05f5", "1_model_name": "mixedbread-ai/mxbai-embed-large-v1", "1_prompt": "Learning word representations to improve IR tasks like query expansion and classification", "1_output": [["Learning word representations to improve IR tasks like query expansion and classification", "Title: Relevance-based Word Embedding\n\nAbstract: Learning a high-dimensional dense representation for vocabulary terms, also known as a word embedding, has recently attracted much attention in natural language processing and information retrieval tasks. The embedding vectors are typically learned based on term proximity in a large corpus. This means that the objective in well-known word embedding algorithms, e.g., word2vec, is to accurately predict adjacent word(s) for a given word or context. However, this objective is not necessarily equivalent to the goal of many information retrieval (IR) tasks. The primary objective in various IR tasks is to capture relevance instead of term proximity, syntactic, or even semantic similarity. This is the motivation for developing unsupervised relevance-based word embedding models that learn word representations based on query-document relevance information. In this paper, we propose two learning models with different objective functions; one learns a relevance distribution over the vocabulary set for each query, and the other classifies each term as belonging to the relevant or non-relevant class for each query. To train our models, we used over six million unique queries and the top ranked documents retrieved in response to each query, which are assumed to be relevant to the query. We extrinsically evaluate our learned word representation models using two IR tasks: query expansion and query classification. Both query expansion experiments on four TREC collections and query classification experiments on the KDD Cup 2005 dataset suggest that the relevance-based word embedding models significantly outperform state-of-the-art proximity-based embedding models, such as word2vec and GloVe."]], "1_corpus": "arxiv"}
|
11 |
{"tstamp": 1722310388.2572, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "8c01a7990c7d433c8f9c9a38ccfd45b3", "0_model_name": "voyage-multilingual-2", "0_prompt": "who starred in the film far from the madding crowd", "0_output": [["who starred in the film far from the madding crowd", "Far from the Madding Crowd (2015 film)\n\nPlot The film is set in 1870 in Britain. While working on her aunt's farm in Dorset, Bathsheba Everdene (Carey Mulligan) meets neighbouring farmer, Gabriel Oak (Matthias Schoenaerts). He eventually proposes, but the headstrong Bathsheba declines, saying she is too independent. One night, Gabriel's new sheepdog herds his entire flock off a steep cliff. Penniless, Gabriel leaves to search for work. In contrast, Bathsheba inherits an uncle's farm and leaves to run it.\nWhile at a fair trying to find employment, Gabriel sees recruiting officers. A girl there, Fanny Robbin, points out one soldier, Sergeant Frank Troy, her sweetheart. She suggests Gabriel seek employment at a farm in Weatherbury. Upon Gabriel's arrival, several buildings are on fire. He saves the barn from destruction. At dawn the next day, he meets the farm's new mistress, Bathsheba, who hires him as a shepherd.\nWhile in town trading her seed, Bathsheba sees her neighbour William Boldwood (Michael Sheen), a prosperous and mature bachelor. Bathsheba sends Boldwood a Valentine as a joke. He is both offended and intrigued. In the meantime, Fanny goes to the wrong church for her wedding; Troy, believing he's been jilted, is devastated. Boldwood comes to speak to Bathsheba, sees her handling sheep, and is charmed. He invites her to his home, where he proposes marriage. Bathsheba delays giving him a final answer, and Gabriel admonishes her for toying with Boldwood's affections. Stung by the criticism, she fires him. The next day, a crisis arises with the sheep that only Gabriel can manage. She successfully persuades him to return."]], "0_corpus": "wikipedia", "1_conv_id": "2dafecf528e44c34a578dd2bad99f101", "1_model_name": "GritLM/GritLM-7B", "1_prompt": "who starred in the film far from the madding crowd", "1_output": [["who starred in the film far from the madding crowd", "Far from the Madding Crowd (2015 film)\n\nFar from the Madding Crowd is a 2015 British romantic drama film directed by Thomas Vinterberg and starring Carey Mulligan, Matthias Schoenaerts, Tom Sturridge, Michael Sheen, and Juno Temple. An adaptation by David Nicholls of the 1874 novel Far from the Madding Crowd by Thomas Hardy, it is the fourth film adaptation of the novel."]], "1_corpus": "wikipedia"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
{"tstamp": 1722310291.9285, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "9776843ed00d4cab89c0894cad832500", "0_model_name": "sentence-transformers/all-MiniLM-L6-v2", "0_prompt": "what are chapters in the bible called?", "0_output": [["what are chapters in the bible called?", "Stackoverflow Stackexchange\n\nQ: How to strip executables thoroughly I'd like to strip as much as I can - on Linux: an ELF. I only want in there the stuff I need to run it.\nI tried using strip:\nstrip --strip-all elf\n\nBut it doesn't seem to do a good job: nm still displays lots of stuff, and the binary is still big.\nWhat should I do?\n\nA: I would check out this great article that goes into depth on making an ELF executable as small as possible. Maybe it has info that could help!\nhttp://www.muppetlabs.com/~breadbox/software/tiny/teensy.html\n"]], "0_corpus": "stackexchange", "1_conv_id": "87810785bece4d6dab306293a64980f2", "1_model_name": "BAAI/bge-large-en-v1.5", "1_prompt": "what are chapters in the bible called?", "1_output": [["what are chapters in the bible called?", "English Stackexchange\n\nQ: Why are the divisions of the Bible called \"verses\"? In common parlance, a verse is a writing arranged with a metrical rhythm, typically having a rhyme. Most of the books of Bible contain prose which do not follow metrical rhythm or rhyme. But their sentences are called verses. My question therefore is: Why are the divisions of the Bible called verses?\n\nA: A number of questions on Stack Exchange - English Language and Usage have commented on the words 'stanza' and 'verse' in relation to poetry and to scripture.\n'Verses' are also used in Shakespeare.\nMerriam Webster defines a 'verse' in three ways : metrical verse, a stanza or a biblical verse.\nIn any work which requires to be referenced and quoted as part of its function, stanzas or verses are numbered accordingly so that they can be cited.\n"]], "1_corpus": "stackexchange"}
|
10 |
{"tstamp": 1722310355.5437, "task_type": "retrieval", "type": "tievote", "models": ["", ""], "ip": "", "0_conv_id": "256e5bb6399a4ba7b599c9ba9a402e95", "0_model_name": "embed-english-v3.0", "0_prompt": "Learning word representations to improve IR tasks like query expansion and classification", "0_output": [["Learning word representations to improve IR tasks like query expansion and classification", "Title: Relevance-based Word Embedding\n\nAbstract: Learning a high-dimensional dense representation for vocabulary terms, also known as a word embedding, has recently attracted much attention in natural language processing and information retrieval tasks. The embedding vectors are typically learned based on term proximity in a large corpus. This means that the objective in well-known word embedding algorithms, e.g., word2vec, is to accurately predict adjacent word(s) for a given word or context. However, this objective is not necessarily equivalent to the goal of many information retrieval (IR) tasks. The primary objective in various IR tasks is to capture relevance instead of term proximity, syntactic, or even semantic similarity. This is the motivation for developing unsupervised relevance-based word embedding models that learn word representations based on query-document relevance information. In this paper, we propose two learning models with different objective functions; one learns a relevance distribution over the vocabulary set for each query, and the other classifies each term as belonging to the relevant or non-relevant class for each query. To train our models, we used over six million unique queries and the top ranked documents retrieved in response to each query, which are assumed to be relevant to the query. We extrinsically evaluate our learned word representation models using two IR tasks: query expansion and query classification. Both query expansion experiments on four TREC collections and query classification experiments on the KDD Cup 2005 dataset suggest that the relevance-based word embedding models significantly outperform state-of-the-art proximity-based embedding models, such as word2vec and GloVe."]], "0_corpus": "arxiv", "1_conv_id": "2f7dc574c8b94ae7a397af80c4cf05f5", "1_model_name": "mixedbread-ai/mxbai-embed-large-v1", "1_prompt": "Learning word representations to improve IR tasks like query expansion and classification", "1_output": [["Learning word representations to improve IR tasks like query expansion and classification", "Title: Relevance-based Word Embedding\n\nAbstract: Learning a high-dimensional dense representation for vocabulary terms, also known as a word embedding, has recently attracted much attention in natural language processing and information retrieval tasks. The embedding vectors are typically learned based on term proximity in a large corpus. This means that the objective in well-known word embedding algorithms, e.g., word2vec, is to accurately predict adjacent word(s) for a given word or context. However, this objective is not necessarily equivalent to the goal of many information retrieval (IR) tasks. The primary objective in various IR tasks is to capture relevance instead of term proximity, syntactic, or even semantic similarity. This is the motivation for developing unsupervised relevance-based word embedding models that learn word representations based on query-document relevance information. In this paper, we propose two learning models with different objective functions; one learns a relevance distribution over the vocabulary set for each query, and the other classifies each term as belonging to the relevant or non-relevant class for each query. To train our models, we used over six million unique queries and the top ranked documents retrieved in response to each query, which are assumed to be relevant to the query. We extrinsically evaluate our learned word representation models using two IR tasks: query expansion and query classification. Both query expansion experiments on four TREC collections and query classification experiments on the KDD Cup 2005 dataset suggest that the relevance-based word embedding models significantly outperform state-of-the-art proximity-based embedding models, such as word2vec and GloVe."]], "1_corpus": "arxiv"}
|
11 |
{"tstamp": 1722310388.2572, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "8c01a7990c7d433c8f9c9a38ccfd45b3", "0_model_name": "voyage-multilingual-2", "0_prompt": "who starred in the film far from the madding crowd", "0_output": [["who starred in the film far from the madding crowd", "Far from the Madding Crowd (2015 film)\n\nPlot The film is set in 1870 in Britain. While working on her aunt's farm in Dorset, Bathsheba Everdene (Carey Mulligan) meets neighbouring farmer, Gabriel Oak (Matthias Schoenaerts). He eventually proposes, but the headstrong Bathsheba declines, saying she is too independent. One night, Gabriel's new sheepdog herds his entire flock off a steep cliff. Penniless, Gabriel leaves to search for work. In contrast, Bathsheba inherits an uncle's farm and leaves to run it.\nWhile at a fair trying to find employment, Gabriel sees recruiting officers. A girl there, Fanny Robbin, points out one soldier, Sergeant Frank Troy, her sweetheart. She suggests Gabriel seek employment at a farm in Weatherbury. Upon Gabriel's arrival, several buildings are on fire. He saves the barn from destruction. At dawn the next day, he meets the farm's new mistress, Bathsheba, who hires him as a shepherd.\nWhile in town trading her seed, Bathsheba sees her neighbour William Boldwood (Michael Sheen), a prosperous and mature bachelor. Bathsheba sends Boldwood a Valentine as a joke. He is both offended and intrigued. In the meantime, Fanny goes to the wrong church for her wedding; Troy, believing he's been jilted, is devastated. Boldwood comes to speak to Bathsheba, sees her handling sheep, and is charmed. He invites her to his home, where he proposes marriage. Bathsheba delays giving him a final answer, and Gabriel admonishes her for toying with Boldwood's affections. Stung by the criticism, she fires him. The next day, a crisis arises with the sheep that only Gabriel can manage. She successfully persuades him to return."]], "0_corpus": "wikipedia", "1_conv_id": "2dafecf528e44c34a578dd2bad99f101", "1_model_name": "GritLM/GritLM-7B", "1_prompt": "who starred in the film far from the madding crowd", "1_output": [["who starred in the film far from the madding crowd", "Far from the Madding Crowd (2015 film)\n\nFar from the Madding Crowd is a 2015 British romantic drama film directed by Thomas Vinterberg and starring Carey Mulligan, Matthias Schoenaerts, Tom Sturridge, Michael Sheen, and Juno Temple. An adaptation by David Nicholls of the 1874 novel Far from the Madding Crowd by Thomas Hardy, it is the fourth film adaptation of the novel."]], "1_corpus": "wikipedia"}
|
12 |
+
{"tstamp": 1722310737.3362, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "0494368f367f4ce891d1645f49397dce", "0_model_name": "text-embedding-004", "0_prompt": "what happened to sokka after the last airbender", "0_output": [["what happened to sokka after the last airbender", "Sokka\n\nSokka is a fictional character in Nickelodeon's animated television series Avatar: The Last Airbender and its sequel series The Legend of Korra. The character, created by Michael Dante DiMartino and Bryan Konietzko, is voiced by Jack DeSena in the original series and by Chris Hardwick in the sequel series. He is a warrior of the Water Tribe and the son of Chief Hakoda and Kya. In the live-action film adaptation, he is portrayed by Jackson Rathbone, while in the live-action television series adaptation, he is portrayed by Ian Ousley."]], "0_corpus": "wikipedia", "1_conv_id": "2a889447fde54cc5b596b7983ab9b19e", "1_model_name": "GritLM/GritLM-7B", "1_prompt": "what happened to sokka after the last airbender", "1_output": [["what happened to sokka after the last airbender", "List of Avatar: The Last Airbender characters\n\nSokka Sokka () (voiced by Jack DeSena in the original series, Chris Hardwick in the sequel series, portrayed by Jackson Rathbone in the live-action film, Ian Ousley in the live-action series) is a 15-year-old warrior of the Southern Water Tribe, and Katara's older brother. With no bending power of his own, Sokka relies largely on a metallic boomerang, a blunt metal club, a machete, and later a black jian, or sword, created from the metals of a meteorite. Surprisingly in an inhabitant of a mystical world, Sokka is an engineer and something of a jack-of-all-trades, in which respect he is easily able to understand the Fire Nation's advanced technology, and perfects the design of the hot air balloon. In addition, he is both heterodox and resourceful in his endeavors, and a source of comic relief throughout the series. Sokka was in love with the Northern Water Tribe princess Yue at the end of Book One and later shifted his affections to the Kyoshi Warriors' leader Suki in Books Two and Three. In the sequel series, flashbacks reveal Sokka was the first representative of the Southern Water Tribe to sit on the Republic City Council, and possibly its first chairman. He died a few years after Aang, when the next Avatar, Korra, was still a child."]], "1_corpus": "wikipedia"}
|
13 |
+
{"tstamp": 1722310798.4596, "task_type": "retrieval", "type": "leftvote", "models": ["", ""], "ip": "", "0_conv_id": "2ff5ab5b630040549b9bacc23cc1ea95", "0_model_name": "GritLM/GritLM-7B", "0_prompt": "do fled pokemon come back?", "0_output": [["do fled pokemon come back?", "Gaming Stackexchange\n\nQ: Do I have to physically return a gym to retrieve a pokemon when the gym is defeated? I'm thinking about leaving my Vaporeon at the gym near where I work. I know that I might be lucky to keep it there one or two days, but darnit, I wanna do it. So if when the gym falls, do I need to physically return to it to get my pokemon back? \n\nA: No you do not! It returns to you automatically. As of Android version 0.35.0/iOS version 1.50, ousted defenders return to you with 0 HP, per the patch notes.\n"]], "0_corpus": "stackexchange", "1_conv_id": "f684351f5e9a4cbd9e9e2e62282e624c", "1_model_name": "jinaai/jina-embeddings-v2-base-en", "1_prompt": "do fled pokemon come back?", "1_output": [["do fled pokemon come back?", "Stackoverflow Stackexchange\n\nQ: How to find out cl.exe's built-in macros Does anyone know how could I find out which are cl.exe's builtin/predefined macros?\nFor example for gcc the following command line will list all the compiler's builtin macros\ngcc -dM -E - </dev/null\n\nEDIT: I'm interested in a way similar to gcc's that is \"ask the actual compiler\".\nThanks\n\nA: /P preprocessor flag will emit the currently active macros based on the project build settings. I am not sure if it is exactly the equivalent of gcc command you have shown. The output is in .I file.\n"]], "1_corpus": "stackexchange"}
|
14 |
+
{"tstamp": 1722310831.0005, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "3495497b338a42f68c1ed1ff16f8d3ab", "0_model_name": "BM25", "0_prompt": "a great benchmark for text embeddings", "0_output": [["a great benchmark for text embeddings", "Title: Recent advances in text embedding: A Comprehensive Review of Top-Performing Methods on the MTEB Benchmark\n\nAbstract: Text embedding methods have become increasingly popular in both industrial and academic fields due to their critical role in a variety of natural language processing tasks. The significance of universal text embeddings has been further highlighted with the rise of Large Language Models (LLMs) applications such as Retrieval-Augmented Systems (RAGs). While previous models have attempted to be general-purpose, they often struggle to generalize across tasks and domains. However, recent advancements in training data quantity, quality and diversity; synthetic data generation from LLMs as well as using LLMs as backbones encourage great improvements in pursuing universal text embeddings. In this paper, we provide an overview of the recent advances in universal text embedding models with a focus on the top performing text embeddings on Massive Text Embedding Benchmark (MTEB). Through detailed comparison and analysis, we highlight the key contributions and limitations in this area, and propose potentially inspiring future research directions."]], "0_corpus": "arxiv", "1_conv_id": "732203ab60e34d408f5ea6db08909f9a", "1_model_name": "text-embedding-004", "1_prompt": "a great benchmark for text embeddings", "1_output": [["a great benchmark for text embeddings", "Title: MTEB: Massive Text Embedding Benchmark\n\nAbstract: Text embeddings are commonly evaluated on a small set of datasets from a single task not covering their possible applications to other tasks. It is unclear whether state-of-the-art embeddings on semantic textual similarity (STS) can be equally well applied to other tasks like clustering or reranking. This makes progress in the field difficult to track, as various models are constantly being proposed without proper evaluation. To solve this problem, we introduce the Massive Text Embedding Benchmark (MTEB). MTEB spans 8 embedding tasks covering a total of 58 datasets and 112 languages. Through the benchmarking of 33 models on MTEB, we establish the most comprehensive benchmark of text embeddings to date. We find that no particular text embedding method dominates across all tasks. This suggests that the field has yet to converge on a universal text embedding method and scale it up sufficiently to provide state-of-the-art results on all embedding tasks. MTEB comes with open-source code and a public leaderboard at https://github.com/embeddings-benchmark/mteb."]], "1_corpus": "arxiv"}
|
15 |
+
{"tstamp": 1722310835.8058, "task_type": "retrieval", "type": "bothbadvote", "models": ["", ""], "ip": "", "0_conv_id": "51662d2391894809a37610cb3c419e8a", "0_model_name": "intfloat/multilingual-e5-large-instruct", "0_prompt": "what is the difference between field and sigma field?", "0_output": [["what is the difference between field and sigma field?", "Math Stackexchange\n\nQ: $\\sigma$-algebra vs. $\\sigma$-field: is there any difference? The subject says it all: is there any difference between the two concepts of $\\sigma$-algebra and $\\sigma$-field? In probability theory, they seem to be used more or less interchangeably. If there is no difference, is there any historical reason why some people/schools use the term $\\sigma$-algebra, while others use the term $\\sigma-$field?\n\nA: If you want to be completely sure you can see Taylor´s Introduction to Measure and Integration where he states that both concepts refer to the same thing.\n"]], "0_corpus": "stackexchange", "1_conv_id": "c60e866a11304ec09c51a718d9b9bbb8", "1_model_name": "mixedbread-ai/mxbai-embed-large-v1", "1_prompt": "what is the difference between field and sigma field?", "1_output": [["what is the difference between field and sigma field?", "Math Stackexchange\n\nQ: $\\sigma$-algebra vs. $\\sigma$-field: is there any difference? The subject says it all: is there any difference between the two concepts of $\\sigma$-algebra and $\\sigma$-field? In probability theory, they seem to be used more or less interchangeably. If there is no difference, is there any historical reason why some people/schools use the term $\\sigma$-algebra, while others use the term $\\sigma-$field?\n\nA: If you want to be completely sure you can see Taylor´s Introduction to Measure and Integration where he states that both concepts refer to the same thing.\n"]], "1_corpus": "stackexchange"}
|
16 |
+
{"tstamp": 1722310863.6196, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "4f330aeb5b2b442d8190b75b12ed1f29", "0_model_name": "nomic-ai/nomic-embed-text-v1.5", "0_prompt": "New search algorithm inspired by AlphaZero for better theorem proving results", "0_output": [["New search algorithm inspired by AlphaZero for better theorem proving results", "Title: On the propagation of semiclassical Wigner functions\n\nAbstract: We establish the difference between the propagation of semiclassical Wigner functions and classical Liouville propagation. First we re-discuss the semiclassical limit for the propagator of Wigner functions, which on its own leads to their classical propagation. Then, via stationary phase evaluation of the full integral evolution equation, using the semiclassical expressions of Wigner functions, we provide the correct geometrical prescription for their semiclassical propagation. This is determined by the classical trajectories of the tips of the chords defined by the initial semiclassical Wigner function and centered on their arguments, in contrast to the Liouville propagation which is determined by the classical trajectories of the arguments themselves."]], "0_corpus": "arxiv", "1_conv_id": "86fc44429afe4954a77719d25bf4ef90", "1_model_name": "GritLM/GritLM-7B", "1_prompt": "New search algorithm inspired by AlphaZero for better theorem proving results", "1_output": [["New search algorithm inspired by AlphaZero for better theorem proving results", "Title: HyperTree Proof Search for Neural Theorem Proving\n\nAbstract: We propose an online training procedure for a transformer-based automated theorem prover. Our approach leverages a new search algorithm, HyperTree Proof Search (HTPS), inspired by the recent success of AlphaZero. Our model learns from previous proof searches through online training, allowing it to generalize to domains far from the training distribution. We report detailed ablations of our pipeline's main components by studying performance on three environments of increasing complexity. In particular, we show that with HTPS alone, a model trained on annotated proofs manages to prove 65.4% of a held-out set of Metamath theorems, significantly outperforming the previous state of the art of 56.5% by GPT-f. Online training on these unproved theorems increases accuracy to 82.6%. With a similar computational budget, we improve the state of the art on the Lean-based miniF2F-curriculum dataset from 31% to 42% proving accuracy."]], "1_corpus": "arxiv"}
|
17 |
+
{"tstamp": 1722310892.1304, "task_type": "retrieval", "type": "tievote", "models": ["", ""], "ip": "", "0_conv_id": "c998ef114db443b0a8ea43dcaa79997c", "0_model_name": "GritLM/GritLM-7B", "0_prompt": "can I put metal plate in a microwave", "0_output": [["can I put metal plate in a microwave", "Microwave oven\n\nMetal objects Contrary to popular assumptions, metal objects can be safely used in a microwave oven, but with some restrictions. Any metal or conductive object placed into the microwave oven acts as an antenna to some degree, resulting in an electric current. This causes the object to act as a heating element. This effect varies with the object's shape and composition, and is sometimes utilized for cooking.\nAny object containing pointed metal can create an electric arc (sparks) when microwaved. This includes cutlery, crumpled aluminium foil (though some foil used in microwave ovens is safe, see below), twist-ties containing metal wire, the metal wire carry-handles in oyster pails, or almost any metal formed into a poorly conductive foil or thin wire, or into a pointed shape. Forks are a good example: the tines of the fork respond to the electric field by producing high concentrations of electric charge at the tips. This has the effect of exceeding the dielectric breakdown of air, about 3 megavolts per meter (3×106 V/m). The air forms a conductive plasma, which is visible as a spark. The plasma and the tines may then form a conductive loop, which may be a more effective antenna, resulting in a longer lived spark. When dielectric breakdown occurs in air, some ozone and nitrogen oxides are formed, both of which are unhealthy in large quantities."]], "0_corpus": "wikipedia", "1_conv_id": "088ae8addacd4fa3a40cb59053e8abe6", "1_model_name": "BAAI/bge-large-en-v1.5", "1_prompt": "can I put metal plate in a microwave", "1_output": [["can I put metal plate in a microwave", "Microwave oven\n\nMetal objects Contrary to popular assumptions, metal objects can be safely used in a microwave oven, but with some restrictions. Any metal or conductive object placed into the microwave oven acts as an antenna to some degree, resulting in an electric current. This causes the object to act as a heating element. This effect varies with the object's shape and composition, and is sometimes utilized for cooking.\nAny object containing pointed metal can create an electric arc (sparks) when microwaved. This includes cutlery, crumpled aluminium foil (though some foil used in microwave ovens is safe, see below), twist-ties containing metal wire, the metal wire carry-handles in oyster pails, or almost any metal formed into a poorly conductive foil or thin wire, or into a pointed shape. Forks are a good example: the tines of the fork respond to the electric field by producing high concentrations of electric charge at the tips. This has the effect of exceeding the dielectric breakdown of air, about 3 megavolts per meter (3×106 V/m). The air forms a conductive plasma, which is visible as a spark. The plasma and the tines may then form a conductive loop, which may be a more effective antenna, resulting in a longer lived spark. When dielectric breakdown occurs in air, some ozone and nitrogen oxides are formed, both of which are unhealthy in large quantities."]], "1_corpus": "wikipedia"}
|
data/retrieval_individual-b9229914-47bc-4da8-a21b-89329fff8207.jsonl
CHANGED
@@ -59,3 +59,17 @@
|
|
59 |
{"tstamp": 1722310335.4722, "task_type": "retrieval", "type": "chat", "model": "mixedbread-ai/mxbai-embed-large-v1", "gen_params": {}, "start": 1722310335.2095, "finish": 1722310335.4722, "ip": "", "conv_id": "2f7dc574c8b94ae7a397af80c4cf05f5", "model_name": "mixedbread-ai/mxbai-embed-large-v1", "prompt": "Learning word representations to improve IR tasks like query expansion and classification", "output": [["Learning word representations to improve IR tasks like query expansion and classification", "Title: Relevance-based Word Embedding\n\nAbstract: Learning a high-dimensional dense representation for vocabulary terms, also known as a word embedding, has recently attracted much attention in natural language processing and information retrieval tasks. The embedding vectors are typically learned based on term proximity in a large corpus. This means that the objective in well-known word embedding algorithms, e.g., word2vec, is to accurately predict adjacent word(s) for a given word or context. However, this objective is not necessarily equivalent to the goal of many information retrieval (IR) tasks. The primary objective in various IR tasks is to capture relevance instead of term proximity, syntactic, or even semantic similarity. This is the motivation for developing unsupervised relevance-based word embedding models that learn word representations based on query-document relevance information. In this paper, we propose two learning models with different objective functions; one learns a relevance distribution over the vocabulary set for each query, and the other classifies each term as belonging to the relevant or non-relevant class for each query. To train our models, we used over six million unique queries and the top ranked documents retrieved in response to each query, which are assumed to be relevant to the query. We extrinsically evaluate our learned word representation models using two IR tasks: query expansion and query classification. Both query expansion experiments on four TREC collections and query classification experiments on the KDD Cup 2005 dataset suggest that the relevance-based word embedding models significantly outperform state-of-the-art proximity-based embedding models, such as word2vec and GloVe."]], "corpus": "arxiv"}
|
60 |
{"tstamp": 1722310364.3913, "task_type": "retrieval", "type": "chat", "model": "voyage-multilingual-2", "gen_params": {}, "start": 1722310363.9154, "finish": 1722310364.3913, "ip": "", "conv_id": "8c01a7990c7d433c8f9c9a38ccfd45b3", "model_name": "voyage-multilingual-2", "prompt": "who starred in the film far from the madding crowd", "output": [["who starred in the film far from the madding crowd", "Far from the Madding Crowd (2015 film)\n\nPlot The film is set in 1870 in Britain. While working on her aunt's farm in Dorset, Bathsheba Everdene (Carey Mulligan) meets neighbouring farmer, Gabriel Oak (Matthias Schoenaerts). He eventually proposes, but the headstrong Bathsheba declines, saying she is too independent. One night, Gabriel's new sheepdog herds his entire flock off a steep cliff. Penniless, Gabriel leaves to search for work. In contrast, Bathsheba inherits an uncle's farm and leaves to run it.\nWhile at a fair trying to find employment, Gabriel sees recruiting officers. A girl there, Fanny Robbin, points out one soldier, Sergeant Frank Troy, her sweetheart. She suggests Gabriel seek employment at a farm in Weatherbury. Upon Gabriel's arrival, several buildings are on fire. He saves the barn from destruction. At dawn the next day, he meets the farm's new mistress, Bathsheba, who hires him as a shepherd.\nWhile in town trading her seed, Bathsheba sees her neighbour William Boldwood (Michael Sheen), a prosperous and mature bachelor. Bathsheba sends Boldwood a Valentine as a joke. He is both offended and intrigued. In the meantime, Fanny goes to the wrong church for her wedding; Troy, believing he's been jilted, is devastated. Boldwood comes to speak to Bathsheba, sees her handling sheep, and is charmed. He invites her to his home, where he proposes marriage. Bathsheba delays giving him a final answer, and Gabriel admonishes her for toying with Boldwood's affections. Stung by the criticism, she fires him. The next day, a crisis arises with the sheep that only Gabriel can manage. She successfully persuades him to return."]], "corpus": "wikipedia"}
|
61 |
{"tstamp": 1722310364.3913, "task_type": "retrieval", "type": "chat", "model": "GritLM/GritLM-7B", "gen_params": {}, "start": 1722310363.9154, "finish": 1722310364.3913, "ip": "", "conv_id": "2dafecf528e44c34a578dd2bad99f101", "model_name": "GritLM/GritLM-7B", "prompt": "who starred in the film far from the madding crowd", "output": [["who starred in the film far from the madding crowd", "Far from the Madding Crowd (2015 film)\n\nFar from the Madding Crowd is a 2015 British romantic drama film directed by Thomas Vinterberg and starring Carey Mulligan, Matthias Schoenaerts, Tom Sturridge, Michael Sheen, and Juno Temple. An adaptation by David Nicholls of the 1874 novel Far from the Madding Crowd by Thomas Hardy, it is the fourth film adaptation of the novel."]], "corpus": "wikipedia"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
{"tstamp": 1722310335.4722, "task_type": "retrieval", "type": "chat", "model": "mixedbread-ai/mxbai-embed-large-v1", "gen_params": {}, "start": 1722310335.2095, "finish": 1722310335.4722, "ip": "", "conv_id": "2f7dc574c8b94ae7a397af80c4cf05f5", "model_name": "mixedbread-ai/mxbai-embed-large-v1", "prompt": "Learning word representations to improve IR tasks like query expansion and classification", "output": [["Learning word representations to improve IR tasks like query expansion and classification", "Title: Relevance-based Word Embedding\n\nAbstract: Learning a high-dimensional dense representation for vocabulary terms, also known as a word embedding, has recently attracted much attention in natural language processing and information retrieval tasks. The embedding vectors are typically learned based on term proximity in a large corpus. This means that the objective in well-known word embedding algorithms, e.g., word2vec, is to accurately predict adjacent word(s) for a given word or context. However, this objective is not necessarily equivalent to the goal of many information retrieval (IR) tasks. The primary objective in various IR tasks is to capture relevance instead of term proximity, syntactic, or even semantic similarity. This is the motivation for developing unsupervised relevance-based word embedding models that learn word representations based on query-document relevance information. In this paper, we propose two learning models with different objective functions; one learns a relevance distribution over the vocabulary set for each query, and the other classifies each term as belonging to the relevant or non-relevant class for each query. To train our models, we used over six million unique queries and the top ranked documents retrieved in response to each query, which are assumed to be relevant to the query. We extrinsically evaluate our learned word representation models using two IR tasks: query expansion and query classification. Both query expansion experiments on four TREC collections and query classification experiments on the KDD Cup 2005 dataset suggest that the relevance-based word embedding models significantly outperform state-of-the-art proximity-based embedding models, such as word2vec and GloVe."]], "corpus": "arxiv"}
|
60 |
{"tstamp": 1722310364.3913, "task_type": "retrieval", "type": "chat", "model": "voyage-multilingual-2", "gen_params": {}, "start": 1722310363.9154, "finish": 1722310364.3913, "ip": "", "conv_id": "8c01a7990c7d433c8f9c9a38ccfd45b3", "model_name": "voyage-multilingual-2", "prompt": "who starred in the film far from the madding crowd", "output": [["who starred in the film far from the madding crowd", "Far from the Madding Crowd (2015 film)\n\nPlot The film is set in 1870 in Britain. While working on her aunt's farm in Dorset, Bathsheba Everdene (Carey Mulligan) meets neighbouring farmer, Gabriel Oak (Matthias Schoenaerts). He eventually proposes, but the headstrong Bathsheba declines, saying she is too independent. One night, Gabriel's new sheepdog herds his entire flock off a steep cliff. Penniless, Gabriel leaves to search for work. In contrast, Bathsheba inherits an uncle's farm and leaves to run it.\nWhile at a fair trying to find employment, Gabriel sees recruiting officers. A girl there, Fanny Robbin, points out one soldier, Sergeant Frank Troy, her sweetheart. She suggests Gabriel seek employment at a farm in Weatherbury. Upon Gabriel's arrival, several buildings are on fire. He saves the barn from destruction. At dawn the next day, he meets the farm's new mistress, Bathsheba, who hires him as a shepherd.\nWhile in town trading her seed, Bathsheba sees her neighbour William Boldwood (Michael Sheen), a prosperous and mature bachelor. Bathsheba sends Boldwood a Valentine as a joke. He is both offended and intrigued. In the meantime, Fanny goes to the wrong church for her wedding; Troy, believing he's been jilted, is devastated. Boldwood comes to speak to Bathsheba, sees her handling sheep, and is charmed. He invites her to his home, where he proposes marriage. Bathsheba delays giving him a final answer, and Gabriel admonishes her for toying with Boldwood's affections. Stung by the criticism, she fires him. The next day, a crisis arises with the sheep that only Gabriel can manage. She successfully persuades him to return."]], "corpus": "wikipedia"}
|
61 |
{"tstamp": 1722310364.3913, "task_type": "retrieval", "type": "chat", "model": "GritLM/GritLM-7B", "gen_params": {}, "start": 1722310363.9154, "finish": 1722310364.3913, "ip": "", "conv_id": "2dafecf528e44c34a578dd2bad99f101", "model_name": "GritLM/GritLM-7B", "prompt": "who starred in the film far from the madding crowd", "output": [["who starred in the film far from the madding crowd", "Far from the Madding Crowd (2015 film)\n\nFar from the Madding Crowd is a 2015 British romantic drama film directed by Thomas Vinterberg and starring Carey Mulligan, Matthias Schoenaerts, Tom Sturridge, Michael Sheen, and Juno Temple. An adaptation by David Nicholls of the 1874 novel Far from the Madding Crowd by Thomas Hardy, it is the fourth film adaptation of the novel."]], "corpus": "wikipedia"}
|
62 |
+
{"tstamp": 1722310685.9008, "task_type": "retrieval", "type": "chat", "model": "text-embedding-004", "gen_params": {}, "start": 1722310684.8371, "finish": 1722310685.9008, "ip": "", "conv_id": "0494368f367f4ce891d1645f49397dce", "model_name": "text-embedding-004", "prompt": "what happened to sokka after the last airbender", "output": [["what happened to sokka after the last airbender", "Sokka\n\nSokka is a fictional character in Nickelodeon's animated television series Avatar: The Last Airbender and its sequel series The Legend of Korra. The character, created by Michael Dante DiMartino and Bryan Konietzko, is voiced by Jack DeSena in the original series and by Chris Hardwick in the sequel series. He is a warrior of the Water Tribe and the son of Chief Hakoda and Kya. In the live-action film adaptation, he is portrayed by Jackson Rathbone, while in the live-action television series adaptation, he is portrayed by Ian Ousley."]], "corpus": "wikipedia"}
|
63 |
+
{"tstamp": 1722310685.9008, "task_type": "retrieval", "type": "chat", "model": "GritLM/GritLM-7B", "gen_params": {}, "start": 1722310684.8371, "finish": 1722310685.9008, "ip": "", "conv_id": "2a889447fde54cc5b596b7983ab9b19e", "model_name": "GritLM/GritLM-7B", "prompt": "what happened to sokka after the last airbender", "output": [["what happened to sokka after the last airbender", "List of Avatar: The Last Airbender characters\n\nSokka Sokka () (voiced by Jack DeSena in the original series, Chris Hardwick in the sequel series, portrayed by Jackson Rathbone in the live-action film, Ian Ousley in the live-action series) is a 15-year-old warrior of the Southern Water Tribe, and Katara's older brother. With no bending power of his own, Sokka relies largely on a metallic boomerang, a blunt metal club, a machete, and later a black jian, or sword, created from the metals of a meteorite. Surprisingly in an inhabitant of a mystical world, Sokka is an engineer and something of a jack-of-all-trades, in which respect he is easily able to understand the Fire Nation's advanced technology, and perfects the design of the hot air balloon. In addition, he is both heterodox and resourceful in his endeavors, and a source of comic relief throughout the series. Sokka was in love with the Northern Water Tribe princess Yue at the end of Book One and later shifted his affections to the Kyoshi Warriors' leader Suki in Books Two and Three. In the sequel series, flashbacks reveal Sokka was the first representative of the Southern Water Tribe to sit on the Republic City Council, and possibly its first chairman. He died a few years after Aang, when the next Avatar, Korra, was still a child."]], "corpus": "wikipedia"}
|
64 |
+
{"tstamp": 1722310758.6297, "task_type": "retrieval", "type": "chat", "model": "GritLM/GritLM-7B", "gen_params": {}, "start": 1722310758.4281, "finish": 1722310758.6297, "ip": "", "conv_id": "2ff5ab5b630040549b9bacc23cc1ea95", "model_name": "GritLM/GritLM-7B", "prompt": "do fled pokemon come back?", "output": [["do fled pokemon come back?", "Gaming Stackexchange\n\nQ: Do I have to physically return a gym to retrieve a pokemon when the gym is defeated? I'm thinking about leaving my Vaporeon at the gym near where I work. I know that I might be lucky to keep it there one or two days, but darnit, I wanna do it. So if when the gym falls, do I need to physically return to it to get my pokemon back? \n\nA: No you do not! It returns to you automatically. As of Android version 0.35.0/iOS version 1.50, ousted defenders return to you with 0 HP, per the patch notes.\n"]], "corpus": "stackexchange"}
|
65 |
+
{"tstamp": 1722310758.6297, "task_type": "retrieval", "type": "chat", "model": "jinaai/jina-embeddings-v2-base-en", "gen_params": {}, "start": 1722310758.4281, "finish": 1722310758.6297, "ip": "", "conv_id": "f684351f5e9a4cbd9e9e2e62282e624c", "model_name": "jinaai/jina-embeddings-v2-base-en", "prompt": "do fled pokemon come back?", "output": [["do fled pokemon come back?", "Stackoverflow Stackexchange\n\nQ: How to find out cl.exe's built-in macros Does anyone know how could I find out which are cl.exe's builtin/predefined macros?\nFor example for gcc the following command line will list all the compiler's builtin macros\ngcc -dM -E - </dev/null\n\nEDIT: I'm interested in a way similar to gcc's that is \"ask the actual compiler\".\nThanks\n\nA: /P preprocessor flag will emit the currently active macros based on the project build settings. I am not sure if it is exactly the equivalent of gcc command you have shown. The output is in .I file.\n"]], "corpus": "stackexchange"}
|
66 |
+
{"tstamp": 1722310810.0352, "task_type": "retrieval", "type": "chat", "model": "intfloat/multilingual-e5-large-instruct", "gen_params": {}, "start": 1722310809.838, "finish": 1722310810.0352, "ip": "", "conv_id": "51662d2391894809a37610cb3c419e8a", "model_name": "intfloat/multilingual-e5-large-instruct", "prompt": "what is the difference between field and sigma field?", "output": [["what is the difference between field and sigma field?", "Math Stackexchange\n\nQ: $\\sigma$-algebra vs. $\\sigma$-field: is there any difference? The subject says it all: is there any difference between the two concepts of $\\sigma$-algebra and $\\sigma$-field? In probability theory, they seem to be used more or less interchangeably. If there is no difference, is there any historical reason why some people/schools use the term $\\sigma$-algebra, while others use the term $\\sigma-$field?\n\nA: If you want to be completely sure you can see Taylor´s Introduction to Measure and Integration where he states that both concepts refer to the same thing.\n"]], "corpus": "stackexchange"}
|
67 |
+
{"tstamp": 1722310810.0352, "task_type": "retrieval", "type": "chat", "model": "mixedbread-ai/mxbai-embed-large-v1", "gen_params": {}, "start": 1722310809.838, "finish": 1722310810.0352, "ip": "", "conv_id": "c60e866a11304ec09c51a718d9b9bbb8", "model_name": "mixedbread-ai/mxbai-embed-large-v1", "prompt": "what is the difference between field and sigma field?", "output": [["what is the difference between field and sigma field?", "Math Stackexchange\n\nQ: $\\sigma$-algebra vs. $\\sigma$-field: is there any difference? The subject says it all: is there any difference between the two concepts of $\\sigma$-algebra and $\\sigma$-field? In probability theory, they seem to be used more or less interchangeably. If there is no difference, is there any historical reason why some people/schools use the term $\\sigma$-algebra, while others use the term $\\sigma-$field?\n\nA: If you want to be completely sure you can see Taylor´s Introduction to Measure and Integration where he states that both concepts refer to the same thing.\n"]], "corpus": "stackexchange"}
|
68 |
+
{"tstamp": 1722310826.3889, "task_type": "retrieval", "type": "chat", "model": "BM25", "gen_params": {}, "start": 1722310825.6751, "finish": 1722310826.3889, "ip": "", "conv_id": "3495497b338a42f68c1ed1ff16f8d3ab", "model_name": "BM25", "prompt": "a great benchmark for text embeddings", "output": [["a great benchmark for text embeddings", "Title: Recent advances in text embedding: A Comprehensive Review of Top-Performing Methods on the MTEB Benchmark\n\nAbstract: Text embedding methods have become increasingly popular in both industrial and academic fields due to their critical role in a variety of natural language processing tasks. The significance of universal text embeddings has been further highlighted with the rise of Large Language Models (LLMs) applications such as Retrieval-Augmented Systems (RAGs). While previous models have attempted to be general-purpose, they often struggle to generalize across tasks and domains. However, recent advancements in training data quantity, quality and diversity; synthetic data generation from LLMs as well as using LLMs as backbones encourage great improvements in pursuing universal text embeddings. In this paper, we provide an overview of the recent advances in universal text embedding models with a focus on the top performing text embeddings on Massive Text Embedding Benchmark (MTEB). Through detailed comparison and analysis, we highlight the key contributions and limitations in this area, and propose potentially inspiring future research directions."]], "corpus": "arxiv"}
|
69 |
+
{"tstamp": 1722310826.3889, "task_type": "retrieval", "type": "chat", "model": "text-embedding-004", "gen_params": {}, "start": 1722310825.6751, "finish": 1722310826.3889, "ip": "", "conv_id": "732203ab60e34d408f5ea6db08909f9a", "model_name": "text-embedding-004", "prompt": "a great benchmark for text embeddings", "output": [["a great benchmark for text embeddings", "Title: MTEB: Massive Text Embedding Benchmark\n\nAbstract: Text embeddings are commonly evaluated on a small set of datasets from a single task not covering their possible applications to other tasks. It is unclear whether state-of-the-art embeddings on semantic textual similarity (STS) can be equally well applied to other tasks like clustering or reranking. This makes progress in the field difficult to track, as various models are constantly being proposed without proper evaluation. To solve this problem, we introduce the Massive Text Embedding Benchmark (MTEB). MTEB spans 8 embedding tasks covering a total of 58 datasets and 112 languages. Through the benchmarking of 33 models on MTEB, we establish the most comprehensive benchmark of text embeddings to date. We find that no particular text embedding method dominates across all tasks. This suggests that the field has yet to converge on a universal text embedding method and scale it up sufficiently to provide state-of-the-art results on all embedding tasks. MTEB comes with open-source code and a public leaderboard at https://github.com/embeddings-benchmark/mteb."]], "corpus": "arxiv"}
|
70 |
+
{"tstamp": 1722310849.7255, "task_type": "retrieval", "type": "chat", "model": "nomic-ai/nomic-embed-text-v1.5", "gen_params": {}, "start": 1722310847.5377, "finish": 1722310849.7255, "ip": "", "conv_id": "4f330aeb5b2b442d8190b75b12ed1f29", "model_name": "nomic-ai/nomic-embed-text-v1.5", "prompt": "New search algorithm inspired by AlphaZero for better theorem proving results", "output": [["New search algorithm inspired by AlphaZero for better theorem proving results", "Title: On the propagation of semiclassical Wigner functions\n\nAbstract: We establish the difference between the propagation of semiclassical Wigner functions and classical Liouville propagation. First we re-discuss the semiclassical limit for the propagator of Wigner functions, which on its own leads to their classical propagation. Then, via stationary phase evaluation of the full integral evolution equation, using the semiclassical expressions of Wigner functions, we provide the correct geometrical prescription for their semiclassical propagation. This is determined by the classical trajectories of the tips of the chords defined by the initial semiclassical Wigner function and centered on their arguments, in contrast to the Liouville propagation which is determined by the classical trajectories of the arguments themselves."]], "corpus": "arxiv"}
|
71 |
+
{"tstamp": 1722310849.7255, "task_type": "retrieval", "type": "chat", "model": "GritLM/GritLM-7B", "gen_params": {}, "start": 1722310847.5377, "finish": 1722310849.7255, "ip": "", "conv_id": "86fc44429afe4954a77719d25bf4ef90", "model_name": "GritLM/GritLM-7B", "prompt": "New search algorithm inspired by AlphaZero for better theorem proving results", "output": [["New search algorithm inspired by AlphaZero for better theorem proving results", "Title: HyperTree Proof Search for Neural Theorem Proving\n\nAbstract: We propose an online training procedure for a transformer-based automated theorem prover. Our approach leverages a new search algorithm, HyperTree Proof Search (HTPS), inspired by the recent success of AlphaZero. Our model learns from previous proof searches through online training, allowing it to generalize to domains far from the training distribution. We report detailed ablations of our pipeline's main components by studying performance on three environments of increasing complexity. In particular, we show that with HTPS alone, a model trained on annotated proofs manages to prove 65.4% of a held-out set of Metamath theorems, significantly outperforming the previous state of the art of 56.5% by GPT-f. Online training on these unproved theorems increases accuracy to 82.6%. With a similar computational budget, we improve the state of the art on the Lean-based miniF2F-curriculum dataset from 31% to 42% proving accuracy."]], "corpus": "arxiv"}
|
72 |
+
{"tstamp": 1722310874.1706, "task_type": "retrieval", "type": "chat", "model": "embed-english-v3.0", "gen_params": {}, "start": 1722310873.894, "finish": 1722310874.1706, "ip": "", "conv_id": "cbdc637378324499be0368c4cf0b44de", "model_name": "embed-english-v3.0", "prompt": "when was the abacus invented in ancient china", "output": [["when was the abacus invented in ancient china", "Abacus\n\nEgypt Greek historian Herodotus mentioned the abacus in Ancient Egypt. He wrote that the Egyptians manipulated the pebbles from right to left, opposite in direction to the Greek left-to-right method. Archaeologists have found ancient disks of various sizes that are thought to have been used as counters. However, wall depictions of this instrument are yet to be discovered.\nPersia\nAt around 600 BC, Persians first began to use the abacus, during the Achaemenid Empire. Under the Parthian, Sassanian, and Iranian empires, scholars concentrated on exchanging knowledge and inventions with the countries around them – India, China, and the Roman Empire- which is how the abacus may have been exported to other countries.\nGreece\nThe earliest archaeological evidence for the use of the Greek abacus dates to the 5th century BC. Demosthenes (384 BC–322 BC) complained that the need to use pebbles for calculations was too difficult. A play by Alexis from the 4th century BC mentions an abacus and pebbles for accounting, and both Diogenes and Polybius use the abacus as a metaphor for human behavior, stating \"that men that sometimes stood for more and sometimes for less\" like the pebbles on an abacus. The Greek abacus was a table of wood or marble, pre-set with small counters in wood or metal for mathematical calculations. This Greek abacus was used in Achaemenid Persia, the Etruscan civilization, Ancient Rome, and the Western Christian world until the French Revolution."]], "corpus": "wikipedia"}
|
73 |
+
{"tstamp": 1722310874.1706, "task_type": "retrieval", "type": "chat", "model": "jinaai/jina-embeddings-v2-base-en", "gen_params": {}, "start": 1722310873.894, "finish": 1722310874.1706, "ip": "", "conv_id": "f78b452ffa04499e97fec3be70a373c3", "model_name": "jinaai/jina-embeddings-v2-base-en", "prompt": "when was the abacus invented in ancient china", "output": [["when was the abacus invented in ancient china", "Abacus\n\nEach rod typically represents one digit of a multi-digit number laid out using a positional numeral system such as base ten (though some cultures used different numerical bases). Roman and East Asian abacuses use a system resembling bi-quinary coded decimal, with a top deck (containing one or two beads) representing fives and a bottom deck (containing four or five beads) representing ones. Natural numbers are normally used, but some allow simple fractional components (e.g. , , and in Roman abacus), and a decimal point can be imagined for fixed-point arithmetic.\nAny particular abacus design supports multiple methods to perform calculations, including addition, subtraction, multiplication, division, and square and cube roots. The beads are first arranged to represent a number, then are manipulated to perform a mathematical operation with another number, and their final position can be read as the result (or can be used as the starting number for subsequent operations).\nIn the ancient world, abacuses were a practical calculating tool. Although calculators and computers are commonly used today instead of abacuses, abacuses remain in everyday use in some countries. The abacus has an advantage of not requiring a writing implement and paper (needed for algorism) or an electric power source. Merchants, traders, and clerks in some parts of Eastern Europe, Russia, China, and Africa use abacuses. The abacus remains in common use as a scoring system in non-electronic table games. Others may use an abacus due to visual impairment that prevents the use of a calculator. The abacus is still used to teach the fundamentals of mathematics to children in most countries."]], "corpus": "wikipedia"}
|
74 |
+
{"tstamp": 1722310885.7903, "task_type": "retrieval", "type": "chat", "model": "GritLM/GritLM-7B", "gen_params": {}, "start": 1722310883.6154, "finish": 1722310885.7903, "ip": "", "conv_id": "c998ef114db443b0a8ea43dcaa79997c", "model_name": "GritLM/GritLM-7B", "prompt": "can I put metal plate in a microwave", "output": [["can I put metal plate in a microwave", "Microwave oven\n\nMetal objects Contrary to popular assumptions, metal objects can be safely used in a microwave oven, but with some restrictions. Any metal or conductive object placed into the microwave oven acts as an antenna to some degree, resulting in an electric current. This causes the object to act as a heating element. This effect varies with the object's shape and composition, and is sometimes utilized for cooking.\nAny object containing pointed metal can create an electric arc (sparks) when microwaved. This includes cutlery, crumpled aluminium foil (though some foil used in microwave ovens is safe, see below), twist-ties containing metal wire, the metal wire carry-handles in oyster pails, or almost any metal formed into a poorly conductive foil or thin wire, or into a pointed shape. Forks are a good example: the tines of the fork respond to the electric field by producing high concentrations of electric charge at the tips. This has the effect of exceeding the dielectric breakdown of air, about 3 megavolts per meter (3×106 V/m). The air forms a conductive plasma, which is visible as a spark. The plasma and the tines may then form a conductive loop, which may be a more effective antenna, resulting in a longer lived spark. When dielectric breakdown occurs in air, some ozone and nitrogen oxides are formed, both of which are unhealthy in large quantities."]], "corpus": "wikipedia"}
|
75 |
+
{"tstamp": 1722310885.7903, "task_type": "retrieval", "type": "chat", "model": "BAAI/bge-large-en-v1.5", "gen_params": {}, "start": 1722310883.6154, "finish": 1722310885.7903, "ip": "", "conv_id": "088ae8addacd4fa3a40cb59053e8abe6", "model_name": "BAAI/bge-large-en-v1.5", "prompt": "can I put metal plate in a microwave", "output": [["can I put metal plate in a microwave", "Microwave oven\n\nMetal objects Contrary to popular assumptions, metal objects can be safely used in a microwave oven, but with some restrictions. Any metal or conductive object placed into the microwave oven acts as an antenna to some degree, resulting in an electric current. This causes the object to act as a heating element. This effect varies with the object's shape and composition, and is sometimes utilized for cooking.\nAny object containing pointed metal can create an electric arc (sparks) when microwaved. This includes cutlery, crumpled aluminium foil (though some foil used in microwave ovens is safe, see below), twist-ties containing metal wire, the metal wire carry-handles in oyster pails, or almost any metal formed into a poorly conductive foil or thin wire, or into a pointed shape. Forks are a good example: the tines of the fork respond to the electric field by producing high concentrations of electric charge at the tips. This has the effect of exceeding the dielectric breakdown of air, about 3 megavolts per meter (3×106 V/m). The air forms a conductive plasma, which is visible as a spark. The plasma and the tines may then form a conductive loop, which may be a more effective antenna, resulting in a longer lived spark. When dielectric breakdown occurs in air, some ozone and nitrogen oxides are formed, both of which are unhealthy in large quantities."]], "corpus": "wikipedia"}
|