Muennighoff
commited on
Scheduled Commit
Browse files- data/clustering_battle-d18695b3-7a6c-4f0d-9361-0dc8845a980a.jsonl +1 -0
- data/clustering_individual-d18695b3-7a6c-4f0d-9361-0dc8845a980a.jsonl +6 -0
- data/retrieval_battle-d18695b3-7a6c-4f0d-9361-0dc8845a980a.jsonl +3 -0
- data/retrieval_individual-d18695b3-7a6c-4f0d-9361-0dc8845a980a.jsonl +8 -0
- data/sts_battle-d18695b3-7a6c-4f0d-9361-0dc8845a980a.jsonl +3 -0
- data/sts_individual-d18695b3-7a6c-4f0d-9361-0dc8845a980a.jsonl +6 -0
data/clustering_battle-d18695b3-7a6c-4f0d-9361-0dc8845a980a.jsonl
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"tstamp": 1724418562.3504, "task_type": "clustering", "type": "tievote", "models": ["", ""], "ip": "", "0_conv_id": "f04068d0b2c9420ba17da62b5dab336a", "0_model_name": "intfloat/e5-mistral-7b-instruct", "0_prompt": ["A", "B", "fear", "joy", "disgust", "anger", "surprise", "happiness", "cumulus", "altostratus", "cirrus"], "0_ncluster": 3, "0_output": "", "0_ndim": "2D (press for 3D)", "0_dim_method": "PCA", "0_clustering_method": "KMeans", "1_conv_id": "b8d23cc24afa4ee0a5da8db1a0aee1b2", "1_model_name": "Alibaba-NLP/gte-Qwen2-7B-instruct", "1_prompt": ["A", "B", "fear", "joy", "disgust", "anger", "surprise", "happiness", "cumulus", "altostratus", "cirrus"], "1_ncluster": 3, "1_output": "", "1_ndim": "2D (press for 3D)", "1_dim_method": "PCA", "1_clustering_method": "KMeans"}
|
data/clustering_individual-d18695b3-7a6c-4f0d-9361-0dc8845a980a.jsonl
CHANGED
@@ -2,3 +2,9 @@
|
|
2 |
{"tstamp": 1724363979.0665, "task_type": "clustering", "type": "chat", "model": "text-embedding-3-large", "gen_params": {}, "start": 1724363967.0838, "finish": 1724363979.0665, "ip": "", "conv_id": "c367f74435df44c1aea92ccf2a8d9d19", "model_name": "text-embedding-3-large", "prompt": ["biology", "history", "physics", "chemistry", "literature", "geography", "mathematics", "comedy", "documentary", "black", "redhead", "convex", "plane", "parabolic", "concave", "C++", "Python", "JavaScript"], "ncluster": 5, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
3 |
{"tstamp": 1724364114.0047, "task_type": "clustering", "type": "chat", "model": "intfloat/multilingual-e5-large-instruct", "gen_params": {}, "start": 1724364112.8394, "finish": 1724364114.0047, "ip": "", "conv_id": "a625144516ba49ea800c9f27d4b04b34", "model_name": "intfloat/multilingual-e5-large-instruct", "prompt": ["Pikachu", "Darth Vader", "Yoda", "Squirtle", "Gandalf", "Legolas", "Mickey Mouse", "Donald Duck", "Charizard"], "ncluster": 4, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
4 |
{"tstamp": 1724364114.0047, "task_type": "clustering", "type": "chat", "model": "text-embedding-3-large", "gen_params": {}, "start": 1724364112.8394, "finish": 1724364114.0047, "ip": "", "conv_id": "938e2bf18b594e898176adac7b1515fb", "model_name": "text-embedding-3-large", "prompt": ["Pikachu", "Darth Vader", "Yoda", "Squirtle", "Gandalf", "Legolas", "Mickey Mouse", "Donald Duck", "Charizard"], "ncluster": 4, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
{"tstamp": 1724363979.0665, "task_type": "clustering", "type": "chat", "model": "text-embedding-3-large", "gen_params": {}, "start": 1724363967.0838, "finish": 1724363979.0665, "ip": "", "conv_id": "c367f74435df44c1aea92ccf2a8d9d19", "model_name": "text-embedding-3-large", "prompt": ["biology", "history", "physics", "chemistry", "literature", "geography", "mathematics", "comedy", "documentary", "black", "redhead", "convex", "plane", "parabolic", "concave", "C++", "Python", "JavaScript"], "ncluster": 5, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
3 |
{"tstamp": 1724364114.0047, "task_type": "clustering", "type": "chat", "model": "intfloat/multilingual-e5-large-instruct", "gen_params": {}, "start": 1724364112.8394, "finish": 1724364114.0047, "ip": "", "conv_id": "a625144516ba49ea800c9f27d4b04b34", "model_name": "intfloat/multilingual-e5-large-instruct", "prompt": ["Pikachu", "Darth Vader", "Yoda", "Squirtle", "Gandalf", "Legolas", "Mickey Mouse", "Donald Duck", "Charizard"], "ncluster": 4, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
4 |
{"tstamp": 1724364114.0047, "task_type": "clustering", "type": "chat", "model": "text-embedding-3-large", "gen_params": {}, "start": 1724364112.8394, "finish": 1724364114.0047, "ip": "", "conv_id": "938e2bf18b594e898176adac7b1515fb", "model_name": "text-embedding-3-large", "prompt": ["Pikachu", "Darth Vader", "Yoda", "Squirtle", "Gandalf", "Legolas", "Mickey Mouse", "Donald Duck", "Charizard"], "ncluster": 4, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
5 |
+
{"tstamp": 1724418528.926, "task_type": "clustering", "type": "chat", "model": "Alibaba-NLP/gte-Qwen2-7B-instruct", "gen_params": {}, "start": 1724418528.8205, "finish": 1724418528.926, "ip": "", "conv_id": "f923696b60234955800e875379579083", "model_name": "Alibaba-NLP/gte-Qwen2-7B-instruct", "prompt": ["semicolon", "colon", "exclamation point", "yacht", "motorboat", "catamaran", "sailboat", "canoe", "jazz", "rock", "classical", "hip-hop", "electronic", "onomatopoeia", "personification"], "ncluster": 4, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
6 |
+
{"tstamp": 1724418528.926, "task_type": "clustering", "type": "chat", "model": "jinaai/jina-embeddings-v2-base-en", "gen_params": {}, "start": 1724418528.8205, "finish": 1724418528.926, "ip": "", "conv_id": "1bfb379de6794a33935323861615af8e", "model_name": "jinaai/jina-embeddings-v2-base-en", "prompt": ["semicolon", "colon", "exclamation point", "yacht", "motorboat", "catamaran", "sailboat", "canoe", "jazz", "rock", "classical", "hip-hop", "electronic", "onomatopoeia", "personification"], "ncluster": 4, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
7 |
+
{"tstamp": 1724418541.9593, "task_type": "clustering", "type": "chat", "model": "intfloat/e5-mistral-7b-instruct", "gen_params": {}, "start": 1724418541.8465, "finish": 1724418541.9593, "ip": "", "conv_id": "f04068d0b2c9420ba17da62b5dab336a", "model_name": "intfloat/e5-mistral-7b-instruct", "prompt": ["A", "B", "fear", "joy", "disgust", "anger", "surprise", "happiness", "cumulus", "altostratus", "cirrus"], "ncluster": 3, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
8 |
+
{"tstamp": 1724418541.9593, "task_type": "clustering", "type": "chat", "model": "Alibaba-NLP/gte-Qwen2-7B-instruct", "gen_params": {}, "start": 1724418541.8465, "finish": 1724418541.9593, "ip": "", "conv_id": "b8d23cc24afa4ee0a5da8db1a0aee1b2", "model_name": "Alibaba-NLP/gte-Qwen2-7B-instruct", "prompt": ["A", "B", "fear", "joy", "disgust", "anger", "surprise", "happiness", "cumulus", "altostratus", "cirrus"], "ncluster": 3, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
9 |
+
{"tstamp": 1724418548.0231, "task_type": "clustering", "type": "chat", "model": "intfloat/e5-mistral-7b-instruct", "gen_params": {}, "start": 1724418547.9169, "finish": 1724418548.0231, "ip": "", "conv_id": "f04068d0b2c9420ba17da62b5dab336a", "model_name": "intfloat/e5-mistral-7b-instruct", "prompt": ["A", "B", "fear", "joy", "disgust", "anger", "surprise", "happiness", "cumulus", "altostratus", "cirrus"], "ncluster": 3, "output": "", "ndim": "2D (press for 3D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
10 |
+
{"tstamp": 1724418548.0231, "task_type": "clustering", "type": "chat", "model": "Alibaba-NLP/gte-Qwen2-7B-instruct", "gen_params": {}, "start": 1724418547.9169, "finish": 1724418548.0231, "ip": "", "conv_id": "b8d23cc24afa4ee0a5da8db1a0aee1b2", "model_name": "Alibaba-NLP/gte-Qwen2-7B-instruct", "prompt": ["A", "B", "fear", "joy", "disgust", "anger", "surprise", "happiness", "cumulus", "altostratus", "cirrus"], "ncluster": 3, "output": "", "ndim": "2D (press for 3D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
data/retrieval_battle-d18695b3-7a6c-4f0d-9361-0dc8845a980a.jsonl
CHANGED
@@ -24,3 +24,6 @@
|
|
24 |
{"tstamp": 1724405996.4116, "task_type": "retrieval", "type": "leftvote", "models": ["", ""], "ip": "", "0_conv_id": "ab0ecadc32b1471593b1d35eeb66f7a1", "0_model_name": "GritLM/GritLM-7B", "0_prompt": "who is the first Chinese to step into the Olympic", "0_output": [["who is the first Chinese to step into the Olympic", "Dalian\n\nQi Faren (), aerospace engineer and chief designer of the Chinese spacecraft Shenzhou.\nZhao Xiaozhe (), vice admiral of the PLA\nLiu Yanan (), retired middle blocker of the China women's national volleyball team.\nYang Hao (), retired outside spiker of the China women's national volleyball team.\nHui Ruoqi (), retired outside spiker and former captain of the China women's national volleyball team.\nLi Yongbo (), retired badminton player and former head coach of the China National Badminton Team.\nLiu Changchun (), sprinter, first athlete to represent China in competition at the Olympic Games.\nQu Yunxia (), middle-distance athlete.\nZhang Wenxiu (), hammer thrower.\nZhang Enhua (), footballer.\nSun Jihai (), footballer.\nBi Fujian (), director, host and professor.\n(), actor.\nYu Nan (), actress.\nLou Yixiao (), actress.\nDong Jie (), actress.\nLin Peng (), actress.\nYang Hongji (), baritone singer and professor.\nSun Nan (), singer.\nKelly Yu (), singer.\nXue Jiye (), painter and sculptor.\nToshiko Fujita (), Japanese voice actress.\nSong Weilong (), actor and model.\nYang Zhuo (), professional Wushu fighter and kickboxer.\nYu Xiaonan (), Chinese Canadian ballet dancer.\nSun Yu (), badminton player.\nKeiichi Inamine (), Japanese politician\nHarry Triguboff, Jewish Australian businessman.\nFeng Enhe (), actor"]], "0_corpus": "wikipedia", "1_conv_id": "ca6f99be2aa84f4987d8586ed1bcb19b", "1_model_name": "BAAI/bge-large-en-v1.5", "1_prompt": "who is the first Chinese to step into the Olympic", "1_output": [["who is the first Chinese to step into the Olympic", "2008 Summer Olympics opening ceremony\n\nUnlike in previous years, North and South Korea did not send a unified team; their athletes marched in separately as Republic of Korea (South Korea, ) and the Democratic People's Republic of Korea (North Korea, ). Taiwan marched under the name \"Chinese Taipei\" () as per a 1989 agreement and in the Olympics since then – and the Chinese media has followed suit, referring to Taiwan as Zhonghua Taibei instead of the previously used and controversial Zhongguo Taibei (, literally \"Taipei, China\").\nThe Chinese contingent, which was last, was led by Yao Ming and Lin Hao, the 9-year-old primary school student who had rescued two schoolmates during the 2008 Sichuan earthquake.\nOpening Addresses\nLiu Qi, the President of the Beijing Organizing Committee for the Olympic Games, gave a speech in Mandarin welcoming the athletes. His Excellency Count Rogge, the President of the International Olympic Committee, followed with a speech in English, praising the Chinese for their warm reception and effort. The Count urged the athletes to \"have fun\" and to reject doping and performance enhancement drugs. This reminder was reiterated in French. Afterward, Hu Jintao, the paramount leader of China, formally announced the opening of the 2008 Summer Olympics by speaking in Mandarin:\nThe Olympic Flag\nAfterward, the Olympic flag was carried in by eight former athletes from China. They were:\nZhang Xielin (table tennis)\nPan Duo (Everest mountaineer)\nZheng Fengrong (athletics)\nYang Yang (A) (short-track speed skating)\nYang Ling (shooting)\n(swimming)\nXiong Ni (diving)"]], "1_corpus": "wikipedia"}
|
25 |
{"tstamp": 1724412311.7493, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "165d3974b767498a98c1775c69d9b193", "0_model_name": "Salesforce/SFR-Embedding-2_R", "0_prompt": "the type of display that google glass presents to its users is called what", "0_output": [["the type of display that google glass presents to its users is called what", "Head-up display\n\nA head-up display, or heads-up display, also known as a HUD () or head-up guidance system (HGS), is any transparent display that presents data without requiring users to look away from their usual viewpoints. The origin of the name stems from a pilot being able to view information with the head positioned \"up\" and looking forward, instead of angled down looking at lower instruments. A HUD also has the advantage that the pilot's eyes do not need to refocus to view the outside after looking at the optically nearer instruments.\nAlthough they were initially developed for military aviation, HUDs are now used in commercial aircraft, automobiles, and other (mostly professional) applications.\nHead-up displays were a precursor technology to augmented reality (AR), incorporating a subset of the features needed for the full AR experience, but lacking the necessary registration and tracking between the virtual content and the user's real-world environment.\nOverview\nA typical HUD contains three primary components: a projector unit, a combiner, and a video generation computer."]], "0_corpus": "wikipedia", "1_conv_id": "af893cb1d2de4c5fb6d7e78348ed5939", "1_model_name": "nomic-ai/nomic-embed-text-v1.5", "1_prompt": "the type of display that google glass presents to its users is called what", "1_output": [["the type of display that google glass presents to its users is called what", "Google Glass\n\nGoogle Glass, or simply Glass, is a brand of smart glasses developed and sold by Google. It was developed by X (previously Google X), with the mission of producing a ubiquitous computer. Google Glass displays information to the wearer using a head-up display. Wearers communicate with the Internet via natural language voice commands.\nGoogle started selling a prototype of Google Glass to qualified \"Glass Explorers\" in the US on April 15, 2013, for a limited period for $1,500, before it became available to the public on May 15, 2014. It has an integrated 5 megapixel still/720p video camera. The headset received a great deal of criticism amid concerns that its use could violate existing privacy laws.\nOn January 15, 2015, Google announced that it would stop producing the Google Glass prototype. The prototype was succeeded by two Enterprise Editions, whose sales were suspended on March 15, 2023."]], "1_corpus": "wikipedia"}
|
26 |
{"tstamp": 1724412360.1887, "task_type": "retrieval", "type": "tievote", "models": ["", ""], "ip": "", "0_conv_id": "bde794d8cbab4423899d251156ea6986", "0_model_name": "BAAI/bge-large-en-v1.5", "0_prompt": "have gave or have given?", "0_output": [["have gave or have given?", "English Stackexchange\n\nQ: American native speaker and perfect tenses Yesterday I sent my friend a useful gift in online game. Today I can see he has not accepted it yet. Which sentence is grammatically correct and most natural in American English?\n\n \n*\n \n*\"You didn't use the Gift I gave you!\"\n \n*\"You didn't use the Gift I had given you!\" \n \n*\"You haven't used the Gift I gave you!\"\n \n*\"You haven't used the Gift I had given you!\"\n \n\nSomehow option 1 seems most natural, and yet is the least consistent with what I remember from English classes. Option 4 should be grammatically correct, but 2 perfect tenses in one sentence seem a bit odd.\n\nA: \nYou didn't use the gift I gave you.\n\ncould imply that you thought the other party was supposed to use the gift at a specific point of time (or when a certain event occurred), and they have lost the opportunity. \n\nYou haven't used the gift I gave you.\n\nimplies that so far the other party has not used the gift, but there's a chance they still will.\n"]], "0_corpus": "stackexchange", "1_conv_id": "f4062dea5bef45fb8097798d90efaddb", "1_model_name": "mixedbread-ai/mxbai-embed-large-v1", "1_prompt": "have gave or have given?", "1_output": [["have gave or have given?", "English Stackexchange\n\nQ: American native speaker and perfect tenses Yesterday I sent my friend a useful gift in online game. Today I can see he has not accepted it yet. Which sentence is grammatically correct and most natural in American English?\n\n \n*\n \n*\"You didn't use the Gift I gave you!\"\n \n*\"You didn't use the Gift I had given you!\" \n \n*\"You haven't used the Gift I gave you!\"\n \n*\"You haven't used the Gift I had given you!\"\n \n\nSomehow option 1 seems most natural, and yet is the least consistent with what I remember from English classes. Option 4 should be grammatically correct, but 2 perfect tenses in one sentence seem a bit odd.\n\nA: \nYou didn't use the gift I gave you.\n\ncould imply that you thought the other party was supposed to use the gift at a specific point of time (or when a certain event occurred), and they have lost the opportunity. \n\nYou haven't used the gift I gave you.\n\nimplies that so far the other party has not used the gift, but there's a chance they still will.\n"]], "1_corpus": "stackexchange"}
|
|
|
|
|
|
|
|
24 |
{"tstamp": 1724405996.4116, "task_type": "retrieval", "type": "leftvote", "models": ["", ""], "ip": "", "0_conv_id": "ab0ecadc32b1471593b1d35eeb66f7a1", "0_model_name": "GritLM/GritLM-7B", "0_prompt": "who is the first Chinese to step into the Olympic", "0_output": [["who is the first Chinese to step into the Olympic", "Dalian\n\nQi Faren (), aerospace engineer and chief designer of the Chinese spacecraft Shenzhou.\nZhao Xiaozhe (), vice admiral of the PLA\nLiu Yanan (), retired middle blocker of the China women's national volleyball team.\nYang Hao (), retired outside spiker of the China women's national volleyball team.\nHui Ruoqi (), retired outside spiker and former captain of the China women's national volleyball team.\nLi Yongbo (), retired badminton player and former head coach of the China National Badminton Team.\nLiu Changchun (), sprinter, first athlete to represent China in competition at the Olympic Games.\nQu Yunxia (), middle-distance athlete.\nZhang Wenxiu (), hammer thrower.\nZhang Enhua (), footballer.\nSun Jihai (), footballer.\nBi Fujian (), director, host and professor.\n(), actor.\nYu Nan (), actress.\nLou Yixiao (), actress.\nDong Jie (), actress.\nLin Peng (), actress.\nYang Hongji (), baritone singer and professor.\nSun Nan (), singer.\nKelly Yu (), singer.\nXue Jiye (), painter and sculptor.\nToshiko Fujita (), Japanese voice actress.\nSong Weilong (), actor and model.\nYang Zhuo (), professional Wushu fighter and kickboxer.\nYu Xiaonan (), Chinese Canadian ballet dancer.\nSun Yu (), badminton player.\nKeiichi Inamine (), Japanese politician\nHarry Triguboff, Jewish Australian businessman.\nFeng Enhe (), actor"]], "0_corpus": "wikipedia", "1_conv_id": "ca6f99be2aa84f4987d8586ed1bcb19b", "1_model_name": "BAAI/bge-large-en-v1.5", "1_prompt": "who is the first Chinese to step into the Olympic", "1_output": [["who is the first Chinese to step into the Olympic", "2008 Summer Olympics opening ceremony\n\nUnlike in previous years, North and South Korea did not send a unified team; their athletes marched in separately as Republic of Korea (South Korea, ) and the Democratic People's Republic of Korea (North Korea, ). Taiwan marched under the name \"Chinese Taipei\" () as per a 1989 agreement and in the Olympics since then – and the Chinese media has followed suit, referring to Taiwan as Zhonghua Taibei instead of the previously used and controversial Zhongguo Taibei (, literally \"Taipei, China\").\nThe Chinese contingent, which was last, was led by Yao Ming and Lin Hao, the 9-year-old primary school student who had rescued two schoolmates during the 2008 Sichuan earthquake.\nOpening Addresses\nLiu Qi, the President of the Beijing Organizing Committee for the Olympic Games, gave a speech in Mandarin welcoming the athletes. His Excellency Count Rogge, the President of the International Olympic Committee, followed with a speech in English, praising the Chinese for their warm reception and effort. The Count urged the athletes to \"have fun\" and to reject doping and performance enhancement drugs. This reminder was reiterated in French. Afterward, Hu Jintao, the paramount leader of China, formally announced the opening of the 2008 Summer Olympics by speaking in Mandarin:\nThe Olympic Flag\nAfterward, the Olympic flag was carried in by eight former athletes from China. They were:\nZhang Xielin (table tennis)\nPan Duo (Everest mountaineer)\nZheng Fengrong (athletics)\nYang Yang (A) (short-track speed skating)\nYang Ling (shooting)\n(swimming)\nXiong Ni (diving)"]], "1_corpus": "wikipedia"}
|
25 |
{"tstamp": 1724412311.7493, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "165d3974b767498a98c1775c69d9b193", "0_model_name": "Salesforce/SFR-Embedding-2_R", "0_prompt": "the type of display that google glass presents to its users is called what", "0_output": [["the type of display that google glass presents to its users is called what", "Head-up display\n\nA head-up display, or heads-up display, also known as a HUD () or head-up guidance system (HGS), is any transparent display that presents data without requiring users to look away from their usual viewpoints. The origin of the name stems from a pilot being able to view information with the head positioned \"up\" and looking forward, instead of angled down looking at lower instruments. A HUD also has the advantage that the pilot's eyes do not need to refocus to view the outside after looking at the optically nearer instruments.\nAlthough they were initially developed for military aviation, HUDs are now used in commercial aircraft, automobiles, and other (mostly professional) applications.\nHead-up displays were a precursor technology to augmented reality (AR), incorporating a subset of the features needed for the full AR experience, but lacking the necessary registration and tracking between the virtual content and the user's real-world environment.\nOverview\nA typical HUD contains three primary components: a projector unit, a combiner, and a video generation computer."]], "0_corpus": "wikipedia", "1_conv_id": "af893cb1d2de4c5fb6d7e78348ed5939", "1_model_name": "nomic-ai/nomic-embed-text-v1.5", "1_prompt": "the type of display that google glass presents to its users is called what", "1_output": [["the type of display that google glass presents to its users is called what", "Google Glass\n\nGoogle Glass, or simply Glass, is a brand of smart glasses developed and sold by Google. It was developed by X (previously Google X), with the mission of producing a ubiquitous computer. Google Glass displays information to the wearer using a head-up display. Wearers communicate with the Internet via natural language voice commands.\nGoogle started selling a prototype of Google Glass to qualified \"Glass Explorers\" in the US on April 15, 2013, for a limited period for $1,500, before it became available to the public on May 15, 2014. It has an integrated 5 megapixel still/720p video camera. The headset received a great deal of criticism amid concerns that its use could violate existing privacy laws.\nOn January 15, 2015, Google announced that it would stop producing the Google Glass prototype. The prototype was succeeded by two Enterprise Editions, whose sales were suspended on March 15, 2023."]], "1_corpus": "wikipedia"}
|
26 |
{"tstamp": 1724412360.1887, "task_type": "retrieval", "type": "tievote", "models": ["", ""], "ip": "", "0_conv_id": "bde794d8cbab4423899d251156ea6986", "0_model_name": "BAAI/bge-large-en-v1.5", "0_prompt": "have gave or have given?", "0_output": [["have gave or have given?", "English Stackexchange\n\nQ: American native speaker and perfect tenses Yesterday I sent my friend a useful gift in online game. Today I can see he has not accepted it yet. Which sentence is grammatically correct and most natural in American English?\n\n \n*\n \n*\"You didn't use the Gift I gave you!\"\n \n*\"You didn't use the Gift I had given you!\" \n \n*\"You haven't used the Gift I gave you!\"\n \n*\"You haven't used the Gift I had given you!\"\n \n\nSomehow option 1 seems most natural, and yet is the least consistent with what I remember from English classes. Option 4 should be grammatically correct, but 2 perfect tenses in one sentence seem a bit odd.\n\nA: \nYou didn't use the gift I gave you.\n\ncould imply that you thought the other party was supposed to use the gift at a specific point of time (or when a certain event occurred), and they have lost the opportunity. \n\nYou haven't used the gift I gave you.\n\nimplies that so far the other party has not used the gift, but there's a chance they still will.\n"]], "0_corpus": "stackexchange", "1_conv_id": "f4062dea5bef45fb8097798d90efaddb", "1_model_name": "mixedbread-ai/mxbai-embed-large-v1", "1_prompt": "have gave or have given?", "1_output": [["have gave or have given?", "English Stackexchange\n\nQ: American native speaker and perfect tenses Yesterday I sent my friend a useful gift in online game. Today I can see he has not accepted it yet. Which sentence is grammatically correct and most natural in American English?\n\n \n*\n \n*\"You didn't use the Gift I gave you!\"\n \n*\"You didn't use the Gift I had given you!\" \n \n*\"You haven't used the Gift I gave you!\"\n \n*\"You haven't used the Gift I had given you!\"\n \n\nSomehow option 1 seems most natural, and yet is the least consistent with what I remember from English classes. Option 4 should be grammatically correct, but 2 perfect tenses in one sentence seem a bit odd.\n\nA: \nYou didn't use the gift I gave you.\n\ncould imply that you thought the other party was supposed to use the gift at a specific point of time (or when a certain event occurred), and they have lost the opportunity. \n\nYou haven't used the gift I gave you.\n\nimplies that so far the other party has not used the gift, but there's a chance they still will.\n"]], "1_corpus": "stackexchange"}
|
27 |
+
{"tstamp": 1724418725.0893, "task_type": "retrieval", "type": "leftvote", "models": ["", ""], "ip": "", "0_conv_id": "fa2e7565fba1443bad2dbfaf57c5d9fe", "0_model_name": "GritLM/GritLM-7B", "0_prompt": "do you cut cards to the left or right", "0_output": [["do you cut cards to the left or right", "Cut (cards)\n\nProcedure A common procedure is that after the cards have been shuffled, the dealer sets the cards face-down on the table near the player designated to make the cut. This is usually the player to the dealer's right in clockwise games and the player to his left in anticlockwise games. The cutter initiates a cut of the deck by taking a contiguous range of cards off the top of the deck and placing it face-down on the table farther from the dealer; the dealer completes the cut by taking the original bottom portion of the deck and placing it on top of the just-moved cards. Another common procedure is that the person making the cut, places the top part of the cut closer to the dealer, as the deck originally was placed nearer to the cutter. Once the cut is complete, the dealer picks up the deck, straightens or \"squares\" it, and deals the cards.\nRules of procedure or etiquette may vary concerning who makes the cut, the minimum or maximum number of cards which may be cut off the top, whether the dealer or the cutter restacks the cards, whether a cut card is employed, and whether a cut is mandatory.\nIn some games the cutter may choose not to cut; typically by knocking, i.e. tapping the top of the pack or the table with the fist. In some games, the dealer may then deal the entire hand to each player, rather than individually or in packets. In other games, the cutter may then specify how the dealer is to distribute the cards.\nEtiquette\nDuring informal card games, the dealer is typically not required to offer the cut, and even if offered, the designated player can decline the request. On the other hand, any player may specifically request to cut the cards before they are dealt. If a cut is requested by a player, it must be granted by the dealer."]], "0_corpus": "wikipedia", "1_conv_id": "ef1478cb7fc74ca694c399c651d30669", "1_model_name": "jinaai/jina-embeddings-v2-base-en", "1_prompt": "do you cut cards to the left or right", "1_output": [["do you cut cards to the left or right", "Cut (cards)\n\nIn formal player dealt settings, such as in a casino or during a tournament, an offer to cut the deck is mandatory and the designated player must perform the cut, generally by inserting a cut card (a plastic card about the size of a playing card, usually solid-colored) into the deck; the dealer then makes the actual cut at that point in the deck. When the dealer is not a player (i.e. a casino employee), the cut is mandatory and is usually performed by the dealer. In this instance, the deck is cut onto the aforementioned cut card, and the cut completed; this prevents players from seeing the bottom card of the deck.\nThe contiguous section may also be taken from the middle of the deck. This is called \"Scarne's cut\", though in some settings this is considered poor etiquette or against the rules. A cut involving a very small number of cards, such as taking only the top card, taking some cards from the bottom or taking every card bar the bottom one as a cut, is often acceptable according to rules. Other rules may specify that at least three cards must be taken or left in making a cut. Sometimes up to three cuts are allowed. A sensible minimum is about one-fifth of the deck.\nA cut should always be completed with one hand to limit possibility of a false cut."]], "1_corpus": "wikipedia"}
|
28 |
+
{"tstamp": 1724418771.0645, "task_type": "retrieval", "type": "tievote", "models": ["", ""], "ip": "", "0_conv_id": "fe85ba8551fc44508f67e053129c49ea", "0_model_name": "nomic-ai/nomic-embed-text-v1.5", "0_prompt": "A paper proposing a new Transformer-based framework for general multi-label image classification.", "0_output": [["A paper proposing a new Transformer-based framework for general multi-label image classification.", "Title: General Multi-label Image Classification with Transformers\n\nAbstract: Multi-label image classification is the task of predicting a set of labels corresponding to objects, attributes or other entities present in an image. In this work we propose the Classification Transformer (C-Tran), a general framework for multi-label image classification that leverages Transformers to exploit the complex dependencies among visual features and labels. Our approach consists of a Transformer encoder trained to predict a set of target labels given an input set of masked labels, and visual features from a convolutional neural network. A key ingredient of our method is a label mask training objective that uses a ternary encoding scheme to represent the state of the labels as positive, negative, or unknown during training. Our model shows state-of-the-art performance on challenging datasets such as COCO and Visual Genome. Moreover, because our model explicitly represents the uncertainty of labels during training, it is more general by allowing us to produce improved results for images with partial or extra label annotations during inference. We demonstrate this additional capability in the COCO, Visual Genome, News500, and CUB image datasets."]], "0_corpus": "arxiv", "1_conv_id": "600e33ca6d4d4163863022d8b42eb699", "1_model_name": "BM25", "1_prompt": "A paper proposing a new Transformer-based framework for general multi-label image classification.", "1_output": [["A paper proposing a new Transformer-based framework for general multi-label image classification.", "Title: Graph Attention Transformer Network for Multi-Label Image Classification\n\nAbstract: Multi-label classification aims to recognize multiple objects or attributes from images. However, it is challenging to learn from proper label graphs to effectively characterize such inter-label correlations or dependencies. Current methods often use the co-occurrence probability of labels based on the training set as the adjacency matrix to model this correlation, which is greatly limited by the dataset and affects the model's generalization ability. In this paper, we propose a Graph Attention Transformer Network (GATN), a general framework for multi-label image classification that can effectively mine complex inter-label relationships. First, we use the cosine similarity based on the label word embedding as the initial correlation matrix, which can represent rich semantic information. Subsequently, we design the graph attention transformer layer to transfer this adjacency matrix to adapt to the current domain. Our extensive experiments have demonstrated that our proposed methods can achieve state-of-the-art performance on three datasets."]], "1_corpus": "arxiv"}
|
29 |
+
{"tstamp": 1724418790.7023, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "b82b2105594146f0a7604c28ba7c6ba5", "0_model_name": "sentence-transformers/all-MiniLM-L6-v2", "0_prompt": "what is the difference between street art and outdoor murals?", "0_output": [["what is the difference between street art and outdoor murals?", "Stackoverflow Stackexchange\n\nQ: Django - Keeping the original method's work and add new custom validation This is supposed to be a Django-specific, but I guess it's Python anyway.\nBasically, I don't want to override the work of the original method in the class I am inheriting (could be a Model class), but I'd like to add additional validation. Is this possible? Any hint? \nclass MyUserAdminForm(forms.ModelForm):\n class Meta:\n model = User\n\n def clean(self):\n // do some additional work even though it's cleaned by parent's clean method\n\n\nA: Call the super classes clean method:\ndef clean(self): \n super(MyUserAdminForm, self).clean()\n # more cleaning\n\nThis is a common python thing to do when you subclass something and redefine functionaly but want to make sure you keep the super class functionality. Extremely common when you do an init method, as you always need to ensure the super class constructor gets called to set up the instance. \n"]], "0_corpus": "stackexchange", "1_conv_id": "ae2b7a1870744e42acf578da232b460d", "1_model_name": "intfloat/multilingual-e5-large-instruct", "1_prompt": "what is the difference between street art and outdoor murals?", "1_output": [["what is the difference between street art and outdoor murals?", "English Stackexchange\n\nQ: \"Paintings on walls and ceilings\" and \"painting of portraits, landscapes\" I am creating a portfolio of painter's works and I need to categorize them.\nThere will be two global categories:\n\n\n*\n\n*Paintings on canvas\n\n*Painting on walls and ceilings\n\n\nThe paintings on canvas divide into \"Portraits\", \"Landscapes\", and so on.\nHow should I call paintings of walls and ceilings in English?\nMaybe there is some precise word? I haven't found it, using translators from Russian.\n\nA: They are called murals.\n\nA mural is any piece of artwork painted or applied directly on a wall, ceiling or other large permanent surface. A particularly distinguishing characteristic of mural painting is that the architectural elements of the given space are harmoniously incorporated into the picture.\n\n"]], "1_corpus": "stackexchange"}
|
data/retrieval_individual-d18695b3-7a6c-4f0d-9361-0dc8845a980a.jsonl
CHANGED
@@ -82,3 +82,11 @@
|
|
82 |
{"tstamp": 1724412280.8624, "task_type": "retrieval", "type": "chat", "model": "nomic-ai/nomic-embed-text-v1.5", "gen_params": {}, "start": 1724412278.534, "finish": 1724412280.8624, "ip": "", "conv_id": "af893cb1d2de4c5fb6d7e78348ed5939", "model_name": "nomic-ai/nomic-embed-text-v1.5", "prompt": "the type of display that google glass presents to its users is called what", "output": [["the type of display that google glass presents to its users is called what", "Google Glass\n\nGoogle Glass, or simply Glass, is a brand of smart glasses developed and sold by Google. It was developed by X (previously Google X), with the mission of producing a ubiquitous computer. Google Glass displays information to the wearer using a head-up display. Wearers communicate with the Internet via natural language voice commands.\nGoogle started selling a prototype of Google Glass to qualified \"Glass Explorers\" in the US on April 15, 2013, for a limited period for $1,500, before it became available to the public on May 15, 2014. It has an integrated 5 megapixel still/720p video camera. The headset received a great deal of criticism amid concerns that its use could violate existing privacy laws.\nOn January 15, 2015, Google announced that it would stop producing the Google Glass prototype. The prototype was succeeded by two Enterprise Editions, whose sales were suspended on March 15, 2023."]], "corpus": "wikipedia"}
|
83 |
{"tstamp": 1724412344.1326, "task_type": "retrieval", "type": "chat", "model": "BAAI/bge-large-en-v1.5", "gen_params": {}, "start": 1724412343.9547, "finish": 1724412344.1326, "ip": "", "conv_id": "bde794d8cbab4423899d251156ea6986", "model_name": "BAAI/bge-large-en-v1.5", "prompt": "have gave or have given?", "output": [["have gave or have given?", "English Stackexchange\n\nQ: American native speaker and perfect tenses Yesterday I sent my friend a useful gift in online game. Today I can see he has not accepted it yet. Which sentence is grammatically correct and most natural in American English?\n\n \n*\n \n*\"You didn't use the Gift I gave you!\"\n \n*\"You didn't use the Gift I had given you!\" \n \n*\"You haven't used the Gift I gave you!\"\n \n*\"You haven't used the Gift I had given you!\"\n \n\nSomehow option 1 seems most natural, and yet is the least consistent with what I remember from English classes. Option 4 should be grammatically correct, but 2 perfect tenses in one sentence seem a bit odd.\n\nA: \nYou didn't use the gift I gave you.\n\ncould imply that you thought the other party was supposed to use the gift at a specific point of time (or when a certain event occurred), and they have lost the opportunity. \n\nYou haven't used the gift I gave you.\n\nimplies that so far the other party has not used the gift, but there's a chance they still will.\n"]], "corpus": "stackexchange"}
|
84 |
{"tstamp": 1724412344.1326, "task_type": "retrieval", "type": "chat", "model": "mixedbread-ai/mxbai-embed-large-v1", "gen_params": {}, "start": 1724412343.9547, "finish": 1724412344.1326, "ip": "", "conv_id": "f4062dea5bef45fb8097798d90efaddb", "model_name": "mixedbread-ai/mxbai-embed-large-v1", "prompt": "have gave or have given?", "output": [["have gave or have given?", "English Stackexchange\n\nQ: American native speaker and perfect tenses Yesterday I sent my friend a useful gift in online game. Today I can see he has not accepted it yet. Which sentence is grammatically correct and most natural in American English?\n\n \n*\n \n*\"You didn't use the Gift I gave you!\"\n \n*\"You didn't use the Gift I had given you!\" \n \n*\"You haven't used the Gift I gave you!\"\n \n*\"You haven't used the Gift I had given you!\"\n \n\nSomehow option 1 seems most natural, and yet is the least consistent with what I remember from English classes. Option 4 should be grammatically correct, but 2 perfect tenses in one sentence seem a bit odd.\n\nA: \nYou didn't use the gift I gave you.\n\ncould imply that you thought the other party was supposed to use the gift at a specific point of time (or when a certain event occurred), and they have lost the opportunity. \n\nYou haven't used the gift I gave you.\n\nimplies that so far the other party has not used the gift, but there's a chance they still will.\n"]], "corpus": "stackexchange"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
{"tstamp": 1724412280.8624, "task_type": "retrieval", "type": "chat", "model": "nomic-ai/nomic-embed-text-v1.5", "gen_params": {}, "start": 1724412278.534, "finish": 1724412280.8624, "ip": "", "conv_id": "af893cb1d2de4c5fb6d7e78348ed5939", "model_name": "nomic-ai/nomic-embed-text-v1.5", "prompt": "the type of display that google glass presents to its users is called what", "output": [["the type of display that google glass presents to its users is called what", "Google Glass\n\nGoogle Glass, or simply Glass, is a brand of smart glasses developed and sold by Google. It was developed by X (previously Google X), with the mission of producing a ubiquitous computer. Google Glass displays information to the wearer using a head-up display. Wearers communicate with the Internet via natural language voice commands.\nGoogle started selling a prototype of Google Glass to qualified \"Glass Explorers\" in the US on April 15, 2013, for a limited period for $1,500, before it became available to the public on May 15, 2014. It has an integrated 5 megapixel still/720p video camera. The headset received a great deal of criticism amid concerns that its use could violate existing privacy laws.\nOn January 15, 2015, Google announced that it would stop producing the Google Glass prototype. The prototype was succeeded by two Enterprise Editions, whose sales were suspended on March 15, 2023."]], "corpus": "wikipedia"}
|
83 |
{"tstamp": 1724412344.1326, "task_type": "retrieval", "type": "chat", "model": "BAAI/bge-large-en-v1.5", "gen_params": {}, "start": 1724412343.9547, "finish": 1724412344.1326, "ip": "", "conv_id": "bde794d8cbab4423899d251156ea6986", "model_name": "BAAI/bge-large-en-v1.5", "prompt": "have gave or have given?", "output": [["have gave or have given?", "English Stackexchange\n\nQ: American native speaker and perfect tenses Yesterday I sent my friend a useful gift in online game. Today I can see he has not accepted it yet. Which sentence is grammatically correct and most natural in American English?\n\n \n*\n \n*\"You didn't use the Gift I gave you!\"\n \n*\"You didn't use the Gift I had given you!\" \n \n*\"You haven't used the Gift I gave you!\"\n \n*\"You haven't used the Gift I had given you!\"\n \n\nSomehow option 1 seems most natural, and yet is the least consistent with what I remember from English classes. Option 4 should be grammatically correct, but 2 perfect tenses in one sentence seem a bit odd.\n\nA: \nYou didn't use the gift I gave you.\n\ncould imply that you thought the other party was supposed to use the gift at a specific point of time (or when a certain event occurred), and they have lost the opportunity. \n\nYou haven't used the gift I gave you.\n\nimplies that so far the other party has not used the gift, but there's a chance they still will.\n"]], "corpus": "stackexchange"}
|
84 |
{"tstamp": 1724412344.1326, "task_type": "retrieval", "type": "chat", "model": "mixedbread-ai/mxbai-embed-large-v1", "gen_params": {}, "start": 1724412343.9547, "finish": 1724412344.1326, "ip": "", "conv_id": "f4062dea5bef45fb8097798d90efaddb", "model_name": "mixedbread-ai/mxbai-embed-large-v1", "prompt": "have gave or have given?", "output": [["have gave or have given?", "English Stackexchange\n\nQ: American native speaker and perfect tenses Yesterday I sent my friend a useful gift in online game. Today I can see he has not accepted it yet. Which sentence is grammatically correct and most natural in American English?\n\n \n*\n \n*\"You didn't use the Gift I gave you!\"\n \n*\"You didn't use the Gift I had given you!\" \n \n*\"You haven't used the Gift I gave you!\"\n \n*\"You haven't used the Gift I had given you!\"\n \n\nSomehow option 1 seems most natural, and yet is the least consistent with what I remember from English classes. Option 4 should be grammatically correct, but 2 perfect tenses in one sentence seem a bit odd.\n\nA: \nYou didn't use the gift I gave you.\n\ncould imply that you thought the other party was supposed to use the gift at a specific point of time (or when a certain event occurred), and they have lost the opportunity. \n\nYou haven't used the gift I gave you.\n\nimplies that so far the other party has not used the gift, but there's a chance they still will.\n"]], "corpus": "stackexchange"}
|
85 |
+
{"tstamp": 1724418706.8955, "task_type": "retrieval", "type": "chat", "model": "GritLM/GritLM-7B", "gen_params": {}, "start": 1724418704.6604, "finish": 1724418706.8955, "ip": "", "conv_id": "fa2e7565fba1443bad2dbfaf57c5d9fe", "model_name": "GritLM/GritLM-7B", "prompt": "do you cut cards to the left or right", "output": [["do you cut cards to the left or right", "Cut (cards)\n\nProcedure A common procedure is that after the cards have been shuffled, the dealer sets the cards face-down on the table near the player designated to make the cut. This is usually the player to the dealer's right in clockwise games and the player to his left in anticlockwise games. The cutter initiates a cut of the deck by taking a contiguous range of cards off the top of the deck and placing it face-down on the table farther from the dealer; the dealer completes the cut by taking the original bottom portion of the deck and placing it on top of the just-moved cards. Another common procedure is that the person making the cut, places the top part of the cut closer to the dealer, as the deck originally was placed nearer to the cutter. Once the cut is complete, the dealer picks up the deck, straightens or \"squares\" it, and deals the cards.\nRules of procedure or etiquette may vary concerning who makes the cut, the minimum or maximum number of cards which may be cut off the top, whether the dealer or the cutter restacks the cards, whether a cut card is employed, and whether a cut is mandatory.\nIn some games the cutter may choose not to cut; typically by knocking, i.e. tapping the top of the pack or the table with the fist. In some games, the dealer may then deal the entire hand to each player, rather than individually or in packets. In other games, the cutter may then specify how the dealer is to distribute the cards.\nEtiquette\nDuring informal card games, the dealer is typically not required to offer the cut, and even if offered, the designated player can decline the request. On the other hand, any player may specifically request to cut the cards before they are dealt. If a cut is requested by a player, it must be granted by the dealer."]], "corpus": "wikipedia"}
|
86 |
+
{"tstamp": 1724418706.8955, "task_type": "retrieval", "type": "chat", "model": "jinaai/jina-embeddings-v2-base-en", "gen_params": {}, "start": 1724418704.6604, "finish": 1724418706.8955, "ip": "", "conv_id": "ef1478cb7fc74ca694c399c651d30669", "model_name": "jinaai/jina-embeddings-v2-base-en", "prompt": "do you cut cards to the left or right", "output": [["do you cut cards to the left or right", "Cut (cards)\n\nIn formal player dealt settings, such as in a casino or during a tournament, an offer to cut the deck is mandatory and the designated player must perform the cut, generally by inserting a cut card (a plastic card about the size of a playing card, usually solid-colored) into the deck; the dealer then makes the actual cut at that point in the deck. When the dealer is not a player (i.e. a casino employee), the cut is mandatory and is usually performed by the dealer. In this instance, the deck is cut onto the aforementioned cut card, and the cut completed; this prevents players from seeing the bottom card of the deck.\nThe contiguous section may also be taken from the middle of the deck. This is called \"Scarne's cut\", though in some settings this is considered poor etiquette or against the rules. A cut involving a very small number of cards, such as taking only the top card, taking some cards from the bottom or taking every card bar the bottom one as a cut, is often acceptable according to rules. Other rules may specify that at least three cards must be taken or left in making a cut. Sometimes up to three cuts are allowed. A sensible minimum is about one-fifth of the deck.\nA cut should always be completed with one hand to limit possibility of a false cut."]], "corpus": "wikipedia"}
|
87 |
+
{"tstamp": 1724418749.0703, "task_type": "retrieval", "type": "chat", "model": "nomic-ai/nomic-embed-text-v1.5", "gen_params": {}, "start": 1724418747.1526, "finish": 1724418749.0703, "ip": "", "conv_id": "fe85ba8551fc44508f67e053129c49ea", "model_name": "nomic-ai/nomic-embed-text-v1.5", "prompt": "A paper proposing a new Transformer-based framework for general multi-label image classification.", "output": [["A paper proposing a new Transformer-based framework for general multi-label image classification.", "Title: General Multi-label Image Classification with Transformers\n\nAbstract: Multi-label image classification is the task of predicting a set of labels corresponding to objects, attributes or other entities present in an image. In this work we propose the Classification Transformer (C-Tran), a general framework for multi-label image classification that leverages Transformers to exploit the complex dependencies among visual features and labels. Our approach consists of a Transformer encoder trained to predict a set of target labels given an input set of masked labels, and visual features from a convolutional neural network. A key ingredient of our method is a label mask training objective that uses a ternary encoding scheme to represent the state of the labels as positive, negative, or unknown during training. Our model shows state-of-the-art performance on challenging datasets such as COCO and Visual Genome. Moreover, because our model explicitly represents the uncertainty of labels during training, it is more general by allowing us to produce improved results for images with partial or extra label annotations during inference. We demonstrate this additional capability in the COCO, Visual Genome, News500, and CUB image datasets."]], "corpus": "arxiv"}
|
88 |
+
{"tstamp": 1724418749.0703, "task_type": "retrieval", "type": "chat", "model": "BM25", "gen_params": {}, "start": 1724418747.1526, "finish": 1724418749.0703, "ip": "", "conv_id": "600e33ca6d4d4163863022d8b42eb699", "model_name": "BM25", "prompt": "A paper proposing a new Transformer-based framework for general multi-label image classification.", "output": [["A paper proposing a new Transformer-based framework for general multi-label image classification.", "Title: Graph Attention Transformer Network for Multi-Label Image Classification\n\nAbstract: Multi-label classification aims to recognize multiple objects or attributes from images. However, it is challenging to learn from proper label graphs to effectively characterize such inter-label correlations or dependencies. Current methods often use the co-occurrence probability of labels based on the training set as the adjacency matrix to model this correlation, which is greatly limited by the dataset and affects the model's generalization ability. In this paper, we propose a Graph Attention Transformer Network (GATN), a general framework for multi-label image classification that can effectively mine complex inter-label relationships. First, we use the cosine similarity based on the label word embedding as the initial correlation matrix, which can represent rich semantic information. Subsequently, we design the graph attention transformer layer to transfer this adjacency matrix to adapt to the current domain. Our extensive experiments have demonstrated that our proposed methods can achieve state-of-the-art performance on three datasets."]], "corpus": "arxiv"}
|
89 |
+
{"tstamp": 1724418780.1003, "task_type": "retrieval", "type": "chat", "model": "sentence-transformers/all-MiniLM-L6-v2", "gen_params": {}, "start": 1724418779.8779, "finish": 1724418780.1003, "ip": "", "conv_id": "b82b2105594146f0a7604c28ba7c6ba5", "model_name": "sentence-transformers/all-MiniLM-L6-v2", "prompt": "what is the difference between street art and outdoor murals?", "output": [["what is the difference between street art and outdoor murals?", "Stackoverflow Stackexchange\n\nQ: Django - Keeping the original method's work and add new custom validation This is supposed to be a Django-specific, but I guess it's Python anyway.\nBasically, I don't want to override the work of the original method in the class I am inheriting (could be a Model class), but I'd like to add additional validation. Is this possible? Any hint? \nclass MyUserAdminForm(forms.ModelForm):\n class Meta:\n model = User\n\n def clean(self):\n // do some additional work even though it's cleaned by parent's clean method\n\n\nA: Call the super classes clean method:\ndef clean(self): \n super(MyUserAdminForm, self).clean()\n # more cleaning\n\nThis is a common python thing to do when you subclass something and redefine functionaly but want to make sure you keep the super class functionality. Extremely common when you do an init method, as you always need to ensure the super class constructor gets called to set up the instance. \n"]], "corpus": "stackexchange"}
|
90 |
+
{"tstamp": 1724418780.1003, "task_type": "retrieval", "type": "chat", "model": "intfloat/multilingual-e5-large-instruct", "gen_params": {}, "start": 1724418779.8779, "finish": 1724418780.1003, "ip": "", "conv_id": "ae2b7a1870744e42acf578da232b460d", "model_name": "intfloat/multilingual-e5-large-instruct", "prompt": "what is the difference between street art and outdoor murals?", "output": [["what is the difference between street art and outdoor murals?", "English Stackexchange\n\nQ: \"Paintings on walls and ceilings\" and \"painting of portraits, landscapes\" I am creating a portfolio of painter's works and I need to categorize them.\nThere will be two global categories:\n\n\n*\n\n*Paintings on canvas\n\n*Painting on walls and ceilings\n\n\nThe paintings on canvas divide into \"Portraits\", \"Landscapes\", and so on.\nHow should I call paintings of walls and ceilings in English?\nMaybe there is some precise word? I haven't found it, using translators from Russian.\n\nA: They are called murals.\n\nA mural is any piece of artwork painted or applied directly on a wall, ceiling or other large permanent surface. A particularly distinguishing characteristic of mural painting is that the architectural elements of the given space are harmoniously incorporated into the picture.\n\n"]], "corpus": "stackexchange"}
|
91 |
+
{"tstamp": 1724418801.2335, "task_type": "retrieval", "type": "chat", "model": "BAAI/bge-large-en-v1.5", "gen_params": {}, "start": 1724418801.0775, "finish": 1724418801.2335, "ip": "", "conv_id": "f38af7608bb141a68b5a0415db139e28", "model_name": "BAAI/bge-large-en-v1.5", "prompt": "when was the first season of when calls the heart", "output": [["when was the first season of when calls the heart", "When Calls the Heart\n\nThe series was renewed for a second season, which aired from April 25 to June 13, 2015. Hallmark Channel announced in July 2015 that the series had been renewed for a third season, which aired from February 21 to April 10, 2016, with a sneak peek airing during the 2015 Christmas season.\nIn mid-2016, it was announced that Season 4 would premiere on the Hallmark Channel Christmas Day with a two-hour special. On April 11, 2016, Lissing and Krakow announced via the series' Facebook page that Hallmark Channel had renewed the series for a fourth season, which aired from February 19 to April 23, 2017.\nOn April 24, 2017, series star Erin Krakow announced via the Hallmark Channel website that the show would return for a fifth season, which premiered in February 2018 and ended in April. Filming for season five began in Vancouver on August 22, 2017, and ended on December 21, 2017.\nOn March 14. 2019, Hallmark announced it had dropped Loughlin from future company projects due to her role in the 2019 college admissions bribery scandal. On April 10, 2019, it was announced that season six would resume on May 5, 2019, with Loughlin's scenes edited out.\nOn April 13, 2019, the series was renewed for a seventh season which aired February 23 to April 26, 2020.\nOn April 26, 2020, Hallmark Channel announced via a video from Krakow that the series would return for an eighth season, which aired from February 21 to May 9, 2021."]], "corpus": "wikipedia"}
|
92 |
+
{"tstamp": 1724418801.2335, "task_type": "retrieval", "type": "chat", "model": "mixedbread-ai/mxbai-embed-large-v1", "gen_params": {}, "start": 1724418801.0775, "finish": 1724418801.2335, "ip": "", "conv_id": "5bf232335f6f47b1a04d8962f96e0f6a", "model_name": "mixedbread-ai/mxbai-embed-large-v1", "prompt": "when was the first season of when calls the heart", "output": [["when was the first season of when calls the heart", "When Calls the Heart\n\nWhen Calls the Heart is an American television drama series inspired by Janette Oke's book of the same name from her Canadian West series and developed by Michael Landon Jr. The series began airing on the Hallmark Channel in the United States on January 11, 2014, and on Super Channel in Canada on April 16, 2014.\nThe series originated from a two-hour television movie pilot in October 2013, starring Poppy Drayton as young teacher Elizabeth Thatcher and Stephen Amell as North-West Mounted Police officer Wynn Delaney. In the television series Erin Krakow plays the lead role alongside an ensemble cast.\nOn March 21, 2018, Hallmark renewed the series for a sixth season. The season premiered with a two-hour Christmas special that was broadcast as part of Hallmark's Countdown to Christmas event, and was to continue for a 10-episode run starting in February 2019. However, due to the 2019 college admissions bribery scandal involving Lori Loughlin and her subsequent removal from all Hallmark properties, the season was put on a \"retooling\" hiatus and resumed in May to conclude in June, with Loughlin's scenes edited out.\nOn February 22, 2023, ahead of the tenth season premiere, the series was renewed for an eleventh season, which premiered on April 7, 2024. On May 10, 2024, the series was renewed for a twelfth season, which is set to being production in July."]], "corpus": "wikipedia"}
|
data/sts_battle-d18695b3-7a6c-4f0d-9361-0dc8845a980a.jsonl
CHANGED
@@ -1 +1,4 @@
|
|
1 |
{"tstamp": 1724398734.7978, "task_type": "sts", "type": "tievote", "models": ["", ""], "ip": "", "0_conv_id": "1d711b509b644755aca5e913c4739322", "0_model_name": "BAAI/bge-large-en-v1.5", "0_txt0": "what is space?", "0_txt1": "space is question.", "0_txt2": "sun is a star.", "0_output": "", "1_conv_id": "13af9479fe36496b9fadf05446820471", "1_model_name": "intfloat/multilingual-e5-large-instruct", "1_txt0": "what is space?", "1_txt1": "space is question.", "1_txt2": "sun is a star.", "1_output": ""}
|
|
|
|
|
|
|
|
1 |
{"tstamp": 1724398734.7978, "task_type": "sts", "type": "tievote", "models": ["", ""], "ip": "", "0_conv_id": "1d711b509b644755aca5e913c4739322", "0_model_name": "BAAI/bge-large-en-v1.5", "0_txt0": "what is space?", "0_txt1": "space is question.", "0_txt2": "sun is a star.", "0_output": "", "1_conv_id": "13af9479fe36496b9fadf05446820471", "1_model_name": "intfloat/multilingual-e5-large-instruct", "1_txt0": "what is space?", "1_txt1": "space is question.", "1_txt2": "sun is a star.", "1_output": ""}
|
2 |
+
{"tstamp": 1724418611.0287, "task_type": "sts", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "da52e9fc90124175afa39ced4ed5692a", "0_model_name": "jinaai/jina-embeddings-v2-base-en", "0_txt0": "NORAD would receive tracking information for the hijacked aircraft either from joint use radar or from the relevant FAA air traffic control facility.", "0_txt1": "NORAD gets tracking information for planes from joint use radar or the FAA.", "0_txt2": "NORAD gets no tracking information for planes.", "0_output": "", "1_conv_id": "ac842b8c32c04496a5bc0cff58d3c738", "1_model_name": "Salesforce/SFR-Embedding-2_R", "1_txt0": "NORAD would receive tracking information for the hijacked aircraft either from joint use radar or from the relevant FAA air traffic control facility.", "1_txt1": "NORAD gets tracking information for planes from joint use radar or the FAA.", "1_txt2": "NORAD gets no tracking information for planes.", "1_output": ""}
|
3 |
+
{"tstamp": 1724418634.7275, "task_type": "sts", "type": "tievote", "models": ["", ""], "ip": "", "0_conv_id": "b2e43dc0bc5049e0aac990223208ba5c", "0_model_name": "jinaai/jina-embeddings-v2-base-en", "0_txt0": "An old gray-haired man with glasses in a gray t-shirt is working on a sculpture.", "0_txt1": "The man is 10 years old", "0_txt2": "The man is old", "0_output": "", "1_conv_id": "f2b632dfd83c4de5a008c89aab7d0946", "1_model_name": "intfloat/multilingual-e5-large-instruct", "1_txt0": "An old gray-haired man with glasses in a gray t-shirt is working on a sculpture.", "1_txt1": "The man is 10 years old", "1_txt2": "The man is old", "1_output": ""}
|
4 |
+
{"tstamp": 1724418656.8164, "task_type": "sts", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "48c710cb473245488de7e4dffdc5e05d", "0_model_name": "nomic-ai/nomic-embed-text-v1.5", "0_txt0": "Sweden has a significantly higher rate of infant mortality than Indiana.", "0_txt1": "When I found out that the infant mortality rate in Indiana was 86 percent greater than for mothers in Sweden, I was shocked.", "0_txt2": "The news was a huge surprise to me.", "0_output": "", "1_conv_id": "f309d9a668ed410c8d4eeab03e274fe3", "1_model_name": "text-embedding-004", "1_txt0": "Sweden has a significantly higher rate of infant mortality than Indiana.", "1_txt1": "When I found out that the infant mortality rate in Indiana was 86 percent greater than for mothers in Sweden, I was shocked.", "1_txt2": "The news was a huge surprise to me.", "1_output": ""}
|
data/sts_individual-d18695b3-7a6c-4f0d-9361-0dc8845a980a.jsonl
CHANGED
@@ -4,3 +4,9 @@
|
|
4 |
{"tstamp": 1724364185.8808, "task_type": "sts", "type": "chat", "model": "text-embedding-3-large", "gen_params": {}, "start": 1724364185.3644, "finish": 1724364185.8808, "ip": "", "conv_id": "64b9ddd5c18f4dfe8667d1db61d7add5", "model_name": "text-embedding-3-large", "txt0": "There's a red bus making a left turn into a traffic circle that has a sprinkler system.", "txt1": "A red bus making a turn", "txt2": "A red bus backing up into a spot", "output": ""}
|
5 |
{"tstamp": 1724398474.2791, "task_type": "sts", "type": "chat", "model": "BAAI/bge-large-en-v1.5", "gen_params": {}, "start": 1724398474.2413, "finish": 1724398474.2791, "ip": "", "conv_id": "1d711b509b644755aca5e913c4739322", "model_name": "BAAI/bge-large-en-v1.5", "txt0": "what is space?", "txt1": "space is question.", "txt2": "sun is a star.", "output": ""}
|
6 |
{"tstamp": 1724398474.2791, "task_type": "sts", "type": "chat", "model": "intfloat/multilingual-e5-large-instruct", "gen_params": {}, "start": 1724398474.2413, "finish": 1724398474.2791, "ip": "", "conv_id": "13af9479fe36496b9fadf05446820471", "model_name": "intfloat/multilingual-e5-large-instruct", "txt0": "what is space?", "txt1": "space is question.", "txt2": "sun is a star.", "output": ""}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
{"tstamp": 1724364185.8808, "task_type": "sts", "type": "chat", "model": "text-embedding-3-large", "gen_params": {}, "start": 1724364185.3644, "finish": 1724364185.8808, "ip": "", "conv_id": "64b9ddd5c18f4dfe8667d1db61d7add5", "model_name": "text-embedding-3-large", "txt0": "There's a red bus making a left turn into a traffic circle that has a sprinkler system.", "txt1": "A red bus making a turn", "txt2": "A red bus backing up into a spot", "output": ""}
|
5 |
{"tstamp": 1724398474.2791, "task_type": "sts", "type": "chat", "model": "BAAI/bge-large-en-v1.5", "gen_params": {}, "start": 1724398474.2413, "finish": 1724398474.2791, "ip": "", "conv_id": "1d711b509b644755aca5e913c4739322", "model_name": "BAAI/bge-large-en-v1.5", "txt0": "what is space?", "txt1": "space is question.", "txt2": "sun is a star.", "output": ""}
|
6 |
{"tstamp": 1724398474.2791, "task_type": "sts", "type": "chat", "model": "intfloat/multilingual-e5-large-instruct", "gen_params": {}, "start": 1724398474.2413, "finish": 1724398474.2791, "ip": "", "conv_id": "13af9479fe36496b9fadf05446820471", "model_name": "intfloat/multilingual-e5-large-instruct", "txt0": "what is space?", "txt1": "space is question.", "txt2": "sun is a star.", "output": ""}
|
7 |
+
{"tstamp": 1724418600.209, "task_type": "sts", "type": "chat", "model": "jinaai/jina-embeddings-v2-base-en", "gen_params": {}, "start": 1724418600.1615, "finish": 1724418600.209, "ip": "", "conv_id": "da52e9fc90124175afa39ced4ed5692a", "model_name": "jinaai/jina-embeddings-v2-base-en", "txt0": "NORAD would receive tracking information for the hijacked aircraft either from joint use radar or from the relevant FAA air traffic control facility.", "txt1": "NORAD gets tracking information for planes from joint use radar or the FAA.", "txt2": "NORAD gets no tracking information for planes.", "output": ""}
|
8 |
+
{"tstamp": 1724418600.209, "task_type": "sts", "type": "chat", "model": "Salesforce/SFR-Embedding-2_R", "gen_params": {}, "start": 1724418600.1615, "finish": 1724418600.209, "ip": "", "conv_id": "ac842b8c32c04496a5bc0cff58d3c738", "model_name": "Salesforce/SFR-Embedding-2_R", "txt0": "NORAD would receive tracking information for the hijacked aircraft either from joint use radar or from the relevant FAA air traffic control facility.", "txt1": "NORAD gets tracking information for planes from joint use radar or the FAA.", "txt2": "NORAD gets no tracking information for planes.", "output": ""}
|
9 |
+
{"tstamp": 1724418620.4873, "task_type": "sts", "type": "chat", "model": "jinaai/jina-embeddings-v2-base-en", "gen_params": {}, "start": 1724418620.448, "finish": 1724418620.4873, "ip": "", "conv_id": "b2e43dc0bc5049e0aac990223208ba5c", "model_name": "jinaai/jina-embeddings-v2-base-en", "txt0": "An old gray-haired man with glasses in a gray t-shirt is working on a sculpture.", "txt1": "The man is 10 years old", "txt2": "The man is old", "output": ""}
|
10 |
+
{"tstamp": 1724418620.4873, "task_type": "sts", "type": "chat", "model": "intfloat/multilingual-e5-large-instruct", "gen_params": {}, "start": 1724418620.448, "finish": 1724418620.4873, "ip": "", "conv_id": "f2b632dfd83c4de5a008c89aab7d0946", "model_name": "intfloat/multilingual-e5-large-instruct", "txt0": "An old gray-haired man with glasses in a gray t-shirt is working on a sculpture.", "txt1": "The man is 10 years old", "txt2": "The man is old", "output": ""}
|
11 |
+
{"tstamp": 1724418643.3939, "task_type": "sts", "type": "chat", "model": "nomic-ai/nomic-embed-text-v1.5", "gen_params": {}, "start": 1724418643.0938, "finish": 1724418643.3939, "ip": "", "conv_id": "48c710cb473245488de7e4dffdc5e05d", "model_name": "nomic-ai/nomic-embed-text-v1.5", "txt0": "Sweden has a significantly higher rate of infant mortality than Indiana.", "txt1": "When I found out that the infant mortality rate in Indiana was 86 percent greater than for mothers in Sweden, I was shocked.", "txt2": "The news was a huge surprise to me.", "output": ""}
|
12 |
+
{"tstamp": 1724418643.3939, "task_type": "sts", "type": "chat", "model": "text-embedding-004", "gen_params": {}, "start": 1724418643.0938, "finish": 1724418643.3939, "ip": "", "conv_id": "f309d9a668ed410c8d4eeab03e274fe3", "model_name": "text-embedding-004", "txt0": "Sweden has a significantly higher rate of infant mortality than Indiana.", "txt1": "When I found out that the infant mortality rate in Indiana was 86 percent greater than for mothers in Sweden, I was shocked.", "txt2": "The news was a huge surprise to me.", "output": ""}
|