{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "6522ba5f-13ea-419d-ac40-52b75271704c", "metadata": {}, "outputs": [], "source": [ "import re\n", "import json\n", "import pickle\n", "from pathlib import Path\n", "from collections import OrderedDict, defaultdict\n", "from tqdm import tqdm\n", "from utils_sel import *\n", "from bs4 import BeautifulSoup, NavigableString, Tag, Comment\n", "from selenium.webdriver.common.by import By\n", "from selenium.webdriver.support.ui import WebDriverWait\n", "from selenium.webdriver.support import expected_conditions as EC\n", "from selenium.common.exceptions import NoSuchElementException" ] }, { "cell_type": "markdown", "id": "c06982c7-12fe-41d3-831e-c169514c4f70", "metadata": {}, "source": [ "## Local functions" ] }, { "cell_type": "code", "execution_count": null, "id": "7f0927ab-17f1-4e2f-bc9a-df5e3438901c", "metadata": {}, "outputs": [], "source": [ "HOME_URL = \"https://web.meis.ee/testest/goto.php?target=root_1&client_id=integratsioon\"\n", "SAVE_PATH = Path('./estonian_language_exams/')\n", "\n", "def initialize(production=True):\n", " driver = get_driver(production)\n", " driver.get(HOME_URL)\n", " close_popup = driver.find_element(By.ID, value='sb-nav-close')\n", " WebDriverWait(driver, 10).until(EC.element_to_be_clickable(close_popup))\n", " close_popup.click()\n", " return driver\n", "\n", "def get_tests(driver):\n", " tests = driver.find_element(By.CLASS_NAME, 'ilContainerItemsContainer').find_elements(By.TAG_NAME, 'h4')\n", " tests = [elem.find_element(By.TAG_NAME, 'a') for elem in tests]\n", " links = [elem.get_attribute('href') for elem in tests]\n", " names = [elem.text for elem in tests]\n", " return links, names\n", "\n", "def start_test(driver):\n", " click_button(driver, 'cmd[startPlayer]')\n", " click_button(driver, 'cmd[accessCodeConfirmed]')\n", "\n", "def finish_test(driver):\n", " click_link(driver, 'Lõpeta test')\n", " click_link(driver, 'Vaata vastuseid')\n", "\n", "def get_test_table(driver):\n", " table = driver.find_element(By.CLASS_NAME, 'table-striped').find_element(By.TAG_NAME, 'tbody')\n", " questions = table.find_elements(By.TAG_NAME, 'a')\n", " links = [elem.get_attribute('href') for elem in questions]\n", " names = [elem.text for elem in questions]\n", " ids = [int(elem.find_elements(By.TAG_NAME, 'td')[1].text) for elem in table.find_elements(By.TAG_NAME, 'tr')]\n", " return links, names, ids\n", "\n", "def get_answers_single_type(driver):\n", " instruction = driver.find_element(By.CLASS_NAME, 'ilc_heading3_Headline3').text\n", " questions_answers = driver.find_elements(By.CLASS_NAME, 'ilc_question_SingleChoice')\n", " question = questions_answers[0].find_element(By.CLASS_NAME, 'ilc_qtitle_Title').text\n", " answers = questions_answers[1].find_elements(By.CLASS_NAME, 'ilc_qanswer_Answer')\n", " options = [elem.text.strip() for elem in answers]\n", " correct = [i+1 for i, elem in enumerate(answers) if elem.find_element(By.TAG_NAME, 'img').get_attribute('alt')=='Kontrollitud']\n", " assert len((instruction + question).strip()) > 0\n", " assert len(correct) == 1\n", " return instruction, question, options, str(correct[0])\n", "\n", "def get_page_title_html(driver):\n", " page = driver.find_element(By.ID, 'ilc_Page')\n", " page_html = get_html_with_images(driver, page)\n", " page_name = page.find_element(By.CLASS_NAME, 'ilc_page_title_PageTitle').text.strip()\n", " return page_name, page_html\n", "\n", "def toggle_top_bar(driver, visible=False):\n", " script = f\"\"\"\n", " var topBars = document.getElementsByClassName('ilTopFixed');\n", " for (var i = 0; i < topBars.length; i++) {{\n", " topBars[i].style.display = '{'' if visible else 'none'}';\n", " topBars[i].style.visibility = '{'' if visible else 'hidden'}';\n", " }}\n", " \"\"\"\n", " driver.execute_script(script)\n", "\n", "def hide_own_answer(driver):\n", " questions = driver.find_elements(By.CLASS_NAME, 'ilc_question_Standard')\n", " instruction = driver.find_element(By.CLASS_NAME, 'ilc_qtitle_Title')\n", " copy_element_as_first_child(driver, instruction, questions[1].find_element(By.XPATH, \"./div\"))\n", " hide_between_start_end(driver, questions[0].get_property('parentNode'), 'Sinu vastus oli:', 'Õige vastus on:')\n", "\n", "def get_page_screenshot(driver, remove_own=True, as_bytes=True):\n", " toggle_top_bar(driver)\n", " div = driver.find_element(By.ID, 'ilc_Page')\n", " if remove_own:\n", " hide_own_answer(driver)\n", " im = take_full_div_screenshot(driver, div)\n", " if as_bytes:\n", " return image_to_bytes(im)\n", " return im\n", "\n", "def get_first_num_from_title(title):\n", " match = re.search(r'\\s(\\d+)\\.\\s', title)\n", " return int(match.group(1))\n", "\n", "def save_all_questions(driver, save_dict):\n", " continue_questions = True\n", " while continue_questions:\n", " title, html = get_page_title_html(driver)\n", " save_dict[title] = {'html_question': html}\n", " try:\n", " click_link(driver, 'Järgmine')\n", " except NoSuchElementException:\n", " continue_questions = False\n", "\n", "def nums_from_title(title):\n", " matches = re.findall(r'(\\d+)\\.', title)\n", " nums = [int(match) for match in matches][1:]\n", " if len(nums) == 2:\n", " nums = list(range(nums[0], nums[1]+1))\n", " return nums\n", "\n", "def extract_number_and_text(string):\n", " match = re.match(r'^(\\d+)\\.\\s(.+)$', string)\n", " if match:\n", " return int(match.group(1)), match.group(2)\n", " else:\n", " return None\n", "\n", "def extract_letter_and_text(string):\n", " match = re.match(r'^([A-Z]+)\\s+(.+)$', string)\n", " if match:\n", " return match.group(1), match.group(2)\n", " else:\n", " return None\n", "\n", "def has_repeated(any_list):\n", " return len(any_list) != len(set(any_list))\n", "\n", "def sep_num_ABC_mult(text):\n", " quests = []\n", " opts_dic = defaultdict(list)\n", " for elem in text.split('\\n'):\n", " if txt := elem.strip():\n", " if num_txt := extract_number_and_text(txt):\n", " quests.append(num_txt)\n", " elif alf_txt := extract_letter_and_text(txt):\n", " opts_dic[quests[-1][0]].append(alf_txt)\n", " assert all(len(o) > 1 for o in opts_dic.values())\n", " opts_list = [opts_dic[q[0]] for q in quests]\n", " return quests, [[s[0] for s in ops] for ops in opts_list], [[s[1] for s in ops] for ops in opts_list]" ] }, { "cell_type": "markdown", "id": "149a8bc9-c2f9-44f7-937c-66cb1263cf69", "metadata": {}, "source": [ "## The simple and nicely formated MQA questions" ] }, { "cell_type": "code", "execution_count": null, "id": "00b91c64-88e0-43fb-860a-7435b260a685", "metadata": { "scrolled": true }, "outputs": [], "source": [ "sd_pkl_path = SAVE_PATH / 'simple_data.pkl'\n", "if sd_pkl_path.exists():\n", " with sd_pkl_path.open('rb') as file:\n", " simple_data = pickle.load(file)\n", "else:\n", " simple_data = OrderedDict()\n", " driver = initialize()\n", " click_link(driver, 'Sõeltestid')\n", " tests_links, tests_names = get_tests(driver)\n", " for test_link, test_name in zip(tests_links, tests_names):\n", " simple_data[test_name] = OrderedDict()\n", " driver.get(test_link)\n", " start_test(driver)\n", " finish_test(driver)\n", " links, names, ids = get_test_table(driver)\n", " for url, title, id in zip(links, tqdm(names), ids):\n", " driver.get(url)\n", " instruction, question, options, correct = get_answers_single_type(driver)\n", " simple_data[test_name][id] = {\n", " \"category_original_lang\": test_name,\n", " \"original_question_num\": get_first_num_from_title(title),\n", " \"question\": instruction + '\\n' + question,\n", " \"options\": options,\n", " \"answer\": correct,\n", " \"html\": get_page_title_html(driver)[1],\n", " \"image\": get_page_screenshot(driver)\n", " }\n", " driver.close()\n", " with sd_pkl_path.open('wb') as file:\n", " pickle.dump(simple_data, file)\n", " image_list = []\n", " for test_name in simple_data.keys():\n", " image_list.append(create_image_from_text(test_name))\n", " for id in simple_data[test_name].keys():\n", " image_list.append(simple_data[test_name][id]['image'])\n", " create_pdf_from_images(image_list, SAVE_PATH / 'Sõeltestid.pdf')" ] }, { "cell_type": "markdown", "id": "8dff6bef-8864-415e-ae7f-7cb815c47038", "metadata": {}, "source": [ "## Questions that have varying formatting" ] }, { "cell_type": "markdown", "id": "ba2ebfb2-85d5-45b9-9fb3-cde897019d7c", "metadata": {}, "source": [ "### Saving the pages" ] }, { "cell_type": "code", "execution_count": null, "id": "420321da-016b-4635-acd6-4f816de4e8e1", "metadata": { "scrolled": true }, "outputs": [], "source": [ "cx_pkl_path = SAVE_PATH / 'complex_data.pkl'\n", "if cx_pkl_path.exists():\n", " with cx_pkl_path.open('rb') as file:\n", " complex_data = pickle.load(file)\n", "else:\n", " complex_data = OrderedDict()\n", " driver = initialize()\n", " for complex in ['Diagnoostestid', 'Tasemeeksami näidistestid']:\n", " complex_data[complex] = OrderedDict()\n", " click_link(driver, complex)\n", " tests_links, tests_names = get_tests(driver)\n", " for test_link, test_name in zip(tests_links, tests_names):\n", " driver.get(test_link)\n", " tests_links2, tests_names2 = find_links_names(driver, 'lugemistest')\n", " for test_link2, test_name2 in zip(tests_links2, tests_names2):\n", " complex_data[complex][test_name2] = OrderedDict()\n", " driver.get(test_link2)\n", " start_test(driver)\n", " save_all_questions(driver, complex_data[complex][test_name2])\n", " finish_test(driver)\n", " links, names, ids = get_test_table(driver)\n", " for url, title, id in zip(links, tqdm(names, desc=test_name2), ids):\n", " driver.get(url)\n", " complex_data[complex][test_name2][title]['html'] = get_page_title_html(driver)[1]\n", " complex_data[complex][test_name2][title]['image'] = get_page_screenshot(driver)\n", " driver.get(HOME_URL)\n", " driver.close()\n", " with cx_pkl_path.open('wb') as file:\n", " pickle.dump(complex_data, file)\n", " image_list = []\n", " for complex in complex_data.keys():\n", " image_list.append(create_image_from_text(complex))\n", " for test_name2 in complex_data[complex].keys():\n", " image_list.append(create_image_from_text(test_name2))\n", " for title in complex_data[complex][test_name2].keys():\n", " image_list.append(complex_data[complex][test_name2][title]['image'])\n", " create_pdf_from_images(image_list, SAVE_PATH / 'complex_data.pdf')" ] }, { "cell_type": "markdown", "id": "816dec76-0ae8-4845-8488-a1dd7366d4c1", "metadata": {}, "source": [ "### Extracting the complex data" ] }, { "cell_type": "code", "execution_count": null, "id": "e90e7309-34b9-473f-a454-bc29c2b37a9e", "metadata": {}, "outputs": [], "source": [ "driver = get_driver()\n", "body = driver.find_element(By.TAG_NAME, 'body')" ] }, { "cell_type": "markdown", "id": "89e16f3e-5004-4843-bbad-5ac8d40d86af", "metadata": {}, "source": [ "#### Helper funcs for Soup" ] }, { "cell_type": "code", "execution_count": null, "id": "54621a8e-4c2c-49f8-b858-a630573711c3", "metadata": {}, "outputs": [], "source": [ "def get_text(tag: Tag) -> str:\n", " driver.execute_script('document.body.innerHTML=arguments[0]', str(tag))\n", " return body.text\n", "\n", "def get_elements_between(start_element, end_element):\n", " elements_between = []\n", " current = start_element.next_sibling\n", " while current and current != end_element:\n", " if current.name: # Check if it's a tag and not just text\n", " elements_between.append(current)\n", " current = current.next_sibling\n", " return elements_between\n", "\n", "def smart_get_text(element):\n", " if isinstance(element, Tag) and element.name == 'table':\n", " return process_table(element)\n", " text = get_text(element)\n", " tables = element.find_all('table')\n", " for table in tables:\n", " table_text = process_table(table)\n", " text = text.replace(get_text(table), table_text)\n", " return re.sub(r'\\s*\\n\\s*', '\\n', text).strip()\n", "\n", "def process_table(table):\n", " rows = table.find_all('tr')\n", " if len(rows) == 2:\n", " data = defaultdict(list)\n", " headers = []\n", " for i, row in enumerate(rows):\n", " cells = row.find_all(['th', 'td'])\n", " for j, cell in enumerate(cells):\n", " cell_text = get_text(cell).replace('\\n', ' ')\n", " if i == 0:\n", " headers.append(cell_text)\n", " data[cell_text] = [] # Initialize the list for this header\n", " if i > 0 or len(rows) == 1: # Add data for single-row tables too\n", " data[headers[j]].append(cell_text)\n", " table_strings = []\n", " for header, values in data.items():\n", " table_strings.append(f\"{header}:\\n{', '.join(values)}\")\n", " return '\\n'.join(table_strings)\n", " else:\n", " return get_text(table)\n", "\n", "def get_base_question(soup):\n", " instruction = soup.find('h3').getText(strip=True)\n", " question_div = soup.find('div', {'class': 'ilc_question_Standard'})\n", " middle_elems = get_elements_between(soup.find('h3'), question_div)\n", " middle_context = '\\n\\n'.join([smart_get_text(elem) for elem in middle_elems]) + '\\n\\n'\n", " question = soup.find('div', {'class': 'ilc_qtitle_Title'}).getText(strip=True)\n", " return f'{instruction}\\n\\n{middle_context}{question}'\n", "\n", "def get_opt_an_simple(mqas, soup_answer):\n", " options = [get_text(mqa.parent.parent).strip() for mqa in mqas]\n", " correct_div = soup_answer.find_all('div', {'class': 'ilc_question_SingleChoice'})[1]\n", " imgs = correct_div.find_all('img')\n", " answer = [i+1 for i, img in enumerate(imgs) if img.attrs['title']==\"Kontrollitud\"]\n", " assert len(answer) == 1\n", " answer = int(answer[0])\n", " return options, answer\n", "\n", "def get_opt_an_sel1(select_elem, soup_answer):\n", " options = [elem.getText(strip=True) for elem in select_elem(['option']) if int(elem.attrs['value'])>=0]\n", " correct_div = soup_answer.find_all('div', {'class': 'ilc_question_Standard'})[1]\n", " answer = 1 + options.index(correct_div.find('span', {'class': 'solutionbox'}).getText(strip=True))\n", " assert answer > 0\n", " return options, answer\n", "\n", "def get_opt_an_sel_many(soup, soupan):\n", " question_div = soup.find('div', {'class': 'ilc_question_Standard'})\n", " select_elems = question_div.find_all('select')\n", " if len(soup(['img'])) > 0 or len(question_div(['strong'])) > 0:\n", " return [], [], [] # discard questions with images or mult completions\n", " parents = [elem.parent for elem in select_elems]\n", " select_elems = [elem.extract() for elem in select_elems]\n", " nums_subques = [extract_number_and_text(s.getText(strip=True)) for s in parents]\n", " if any(x is None for x in nums_subques):\n", " return [], [], [] # discard unexpected formats\n", " ans_div = soupan.find_all('div', {'class': 'ilc_question_Standard'})[1]\n", " ans_txt = [ele.getText(strip=True) for ele in ans_div.find_all('span', {'class': 'solutionbox'})]\n", " opts = [[elem.getText(strip=True) for elem in elems(['option']) if int(elem.attrs['value'])>=0] for elems in select_elems]\n", " anss = [1+ops.index(ans) for ans, ops in zip(ans_txt, opts, strict=True)]\n", " assert any(a>0 for a in anss)\n", " if has_repeated(nums_subques):\n", " nums_subques, opts2, opts_expl = sep_num_ABC_mult(parents[0].getText('\\n', strip=True))\n", " assert opts == opts2\n", " return opts_expl, anss, [s[1] for s in nums_subques]\n", " return opts, anss, [s[1] for s in nums_subques]\n", "\n", "def get_complex(ans_html, que_html):\n", " soup = BeautifulSoup(que_html, 'html.parser')\n", " soupan = BeautifulSoup(ans_html, 'html.parser')\n", " question = get_base_question(soup)\n", " select_elems = soup(['select'])\n", " mqas = soup.find_all('input', {'name': 'multiple_choice_result'})\n", " if len(mqas) > 1:\n", " options, answer = get_opt_an_simple(mqas, soupan)\n", " return [question], [options], [answer]\n", " elif len(select_elems) == 1:\n", " options, answer = get_opt_an_sel1(select_elems[0], soupan)\n", " return [question], [options], [answer]\n", " elif len(select_elems) > 1:\n", " optionss, answers, sub_questions = get_opt_an_sel_many(soup, soupan)\n", " if len(answers) > 0:\n", " return [f'{question}\\n\\n{sub}' for sub in sub_questions], optionss, answers\n", " return [], [], []" ] }, { "cell_type": "markdown", "id": "e6404730-1aec-4ff0-9205-f2fe2f09f295", "metadata": {}, "source": [ "#### Running" ] }, { "cell_type": "code", "execution_count": null, "id": "53fe9935-53bd-4cde-b404-33b208d5cd09", "metadata": {}, "outputs": [], "source": [ "data_list = []\n", "img_list_ok = []\n", "for complex in complex_data.keys():\n", " img_list_ok.append(create_image_from_text(complex))\n", " for test_name2 in complex_data[complex].keys():\n", " img_list_ok.append(create_image_from_text(test_name2))\n", " for title in complex_data[complex][test_name2].keys():\n", " data = complex_data[complex][test_name2][title]\n", " data['category_original_lang'] = test_name2 + ' ' + title\n", " nums = nums_from_title(title)\n", " ques, opts, anss = get_complex(data['html'], data['html_question'])\n", " if len(anss) == 0 or any(len(o) == 1 for op in opts for o in op):\n", " continue\n", " assert len(anss) == len(nums)\n", " img_list_ok.append(data['image'])\n", " for num, que, opt, ans in zip(nums, ques, opts, anss, strict=True):\n", " data_new = data.copy()\n", " data_new['original_question_num'] = num\n", " data_new['question'] = que\n", " data_new['options'] = opt\n", " data_new['answer'] = ans\n", " data_list.append(data_new)\n", "create_pdf_from_images(img_list_ok, SAVE_PATH / 'Diagnoostestid.pdf')" ] }, { "cell_type": "markdown", "id": "cb929ba0-96d1-4ed7-99f4-bb6bfed04ec2", "metadata": {}, "source": [ "## Saving the Final JSON" ] }, { "cell_type": "code", "execution_count": null, "id": "d351f3fa-098d-4679-a62b-a3c65dcf5bb2", "metadata": {}, "outputs": [], "source": [ "final_list = []\n", "questions = set()\n", "base_model = {\n", " \"language\": \"et\",\n", " \"country\": \"Estonia\",\n", " \"source\": \"https://web.meis.ee/testest/goto.php?target=root_1&client_id=integratsioon\",\n", " \"license\": \"open\",\n", " \"level\": \"Elementary\",\n", " \"category_en\": \"Language Proficiency Test\",\n", " \"category_original_lang\": \"Eesti keele tasemeeksamid\"\n", "}" ] }, { "cell_type": "code", "execution_count": null, "id": "6c864521-7811-4094-821a-063c8b7cf46d", "metadata": {}, "outputs": [], "source": [ "base_model['file_name'] = 'Sõeltestid.pdf'\n", "for test_name in simple_data.keys():\n", " for id in simple_data[test_name].keys():\n", " base = base_model.copy()\n", " data = simple_data[test_name][id]\n", " base[\"original_question_num\"] = int(data[\"original_question_num\"])\n", " base[\"question\"] = data[\"question\"]\n", " base[\"options\"] = data[\"options\"]\n", " base[\"answer\"] = str(data[\"answer\"])\n", " if not base['question'] in questions:\n", " final_list.append(base)\n", " questions.add(base['question'])" ] }, { "cell_type": "code", "execution_count": null, "id": "b989823b-b0a4-4f1f-a007-543e161b5e1a", "metadata": {}, "outputs": [], "source": [ "base_model['file_name'] = 'Diagnoostestid.pdf'\n", "for data in data_list:\n", " base = base_model.copy()\n", " base[\"original_question_num\"] = int(data[\"original_question_num\"])\n", " base[\"question\"] = data[\"question\"]\n", " base[\"options\"] = data[\"options\"]\n", " base[\"answer\"] = str(data[\"answer\"])\n", " if not base['question'] in questions:\n", " final_list.append(base)\n", " questions.add(base['question'])" ] }, { "cell_type": "code", "execution_count": null, "id": "aa0eec5b-e4b8-4981-ad5c-f483d84d945e", "metadata": {}, "outputs": [], "source": [ "with (SAVE_PATH / 'estonian_language_exams.json').open('w') as file:\n", " json.dump(final_list, file)" ] }, { "cell_type": "code", "execution_count": null, "id": "416e25e7-c15f-40b9-963a-7979f78b82bf", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "491656e7-dfba-4019-9569-f1b32e6d8fa0", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "2ef66610-9c79-4e2e-8369-c5c1bb80aff6", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "f5bcf41f-cd9c-412f-b12f-65e1cda52b9a", "metadata": { "scrolled": true }, "outputs": [], "source": [ "f1, f2, f3 = [], [], []\n", "for i, data in enumerate(tqdm(data_list)):\n", " soup = BeautifulSoup(data['html_question'], 'html.parser')\n", " soupan = BeautifulSoup(data['html'], 'html.parser')\n", " question = get_base_question(soup)\n", " select_elems = soup(['select'])\n", " mqas = soup.find_all('input', {'name': 'multiple_choice_result'})\n", " if len(mqas) > 1:\n", " options, answer = get_opt_an_simple(mqas, soupan)\n", " f1.append((i, question_div, question, options, answer))\n", " elif len(select_elems) == 1:\n", " options, answer = get_opt_an_sel1(select_elems[0], soupan)\n", " f2.append((i, question, options, answer))\n", " elif len(select_elems) > 1:\n", " options, answers, sub_questions = get_opt_an_sel_many(soup, soupan)\n", " if len(answers) > 0:\n", " f3.append((i, question, options, answers, sub_questions))\n", " else:\n", " continue # return [], [], []" ] }, { "cell_type": "code", "execution_count": null, "id": "177ca00c-3e0a-4c47-992e-39b2cd96c2be", "metadata": {}, "outputs": [], "source": [ "f3" ] }, { "cell_type": "code", "execution_count": null, "id": "ff40db8d-ca10-47e8-b2e8-9a9ac82de7fb", "metadata": { "scrolled": true }, "outputs": [], "source": [ "t = []\n", "e = []\n", "p = []\n", "p2 = []\n", "n_imgs_strong = 0\n", "for i, data in enumerate(tqdm(data_list)):\n", " soup = BeautifulSoup(data['html_question'], 'html.parser')\n", " soupan = BeautifulSoup(data['html'], 'html.parser')\n", " question_div = soup.find('div', {'class': 'ilc_question_Standard'})\n", " select_elems = question_div.find_all('select')\n", " if len(select_elems) > 1:\n", " \n", " question_div = soup.find('div', {'class': 'ilc_question_Standard'})\n", " select_elems = question_div.find_all('select')\n", " if len(soup(['img'])) > 0 or len(question_div(['strong'])) > 0:\n", " n_imgs_strong += 1\n", " continue\n", " t.append((i, question_div))\n", " parents = [elem.parent for elem in select_elems]\n", " select_elems = [elem.extract() for elem in select_elems]\n", " nums_subques = [extract_number_and_text(s.getText(strip=True)) for s in parents]\n", " if any(x is None for x in nums_subques):\n", " e.append((i, question_div))\n", " continue\n", " ans_div = soupan.find_all('div', {'class': 'ilc_question_Standard'})[1]\n", " ans_txt = [ele.getText(strip=True) for ele in ans_div.find_all('span', {'class': 'solutionbox'})]\n", " opts = [[elem.getText(strip=True) for elem in elems(['option']) if int(elem.attrs['value'])>=0] for elems in select_elems]\n", " anss = [1+ops.index(ans) for ans, ops in zip(ans_txt, opts, strict=True)]\n", " assert any(a>0 for a in anss)\n", " if has_repeated(nums_subques):\n", " nums_subques, opts2, opts_expl = sep_num_ABC_mult(parents[0].getText('\\n', strip=True))\n", " p2.append((i, nums_subques, opts2, opts_expl))\n", " continue\n", " p.append((i, nums_subques, opts))\n", " \n", " # if or len(soup.find_all('input', {'type': 'text'})) > 0:\n", " # continue" ] }, { "cell_type": "code", "execution_count": null, "id": "65922e51-f4af-4a53-986d-5557786d9cb4", "metadata": {}, "outputs": [], "source": [ "n_imgs_strong, len(t), len(e), len(p), len(p2)" ] }, { "cell_type": "code", "execution_count": null, "id": "d1608624-1edf-406c-a9b3-b622fdd23dea", "metadata": {}, "outputs": [], "source": [ "list1 = [['A', 'B', 'C'], ['A', 'B', 'C']]\n", "list2 = [['A', 'B', 'C'], ['A', 'B', 'C']]\n", "list1 == list2" ] }, { "cell_type": "code", "execution_count": null, "id": "72188085-06c3-449b-9269-014e70f67dec", "metadata": {}, "outputs": [], "source": [ "p2" ] }, { "cell_type": "code", "execution_count": null, "id": "8f7f83c3-8514-407a-a36a-e427ea9aa73d", "metadata": {}, "outputs": [], "source": [ "p[0], p2[0]" ] }, { "cell_type": "code", "execution_count": null, "id": "0da63c8a-bbf0-4fbf-920f-d7b6ce435808", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "f282433d-31aa-4f65-8149-9620f841e39c", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "84cb14b2-2de6-4ff2-b062-2e8862ec928a", "metadata": { "scrolled": true }, "outputs": [], "source": [ "insts" ] }, { "cell_type": "code", "execution_count": null, "id": "abc34128-b318-4678-ac6d-863f2672039e", "metadata": {}, "outputs": [], "source": [ "data = data_list[73]\n", "soup = BeautifulSoup(data['html_question'], 'html.parser')\n", "soupan = BeautifulSoup(data['html'], 'html.parser')\n", "question_elems = soup.find_all('div', {'class': 'ilc_qtitle_Title'})\n", "assert len(question_elems) == 1\n", "question = question_elems[0].text.strip()\n", "instruction_elems = soup.find_all('h3')\n", "assert len(instruction_elems) == 1\n", "instruction = instruction_elems[0].text.strip()\n", "middle_context = [extract_text_with_linebreaks(elem) for elem in get_elements_between(instruction_elems[0], soup.find('div', {'class': 'ilc_question_Standard'}))]\n", "select_elems = soup.find_all('select')\n", "mqas = soup.find_all('input', {'name': 'multiple_choice_result'})" ] }, { "cell_type": "code", "execution_count": null, "id": "2ed0c7d8-928f-4b24-a44c-8d478c245043", "metadata": {}, "outputs": [], "source": [ "for i, data in enumerate(tqdm(data_list)):\n", " soup = BeautifulSoup(data['html_question'], 'html.parser')\n", " soupan = BeautifulSoup(data['html'], 'html.parser')\n", " select_elems = soup.find_all('select')\n", " if len(select_elems) > 1:\n", " assert len(soup.find_all('div', {'class': 'ilc_qanswer_Answer'}))==1" ] }, { "cell_type": "code", "execution_count": null, "id": "a7894c02-a608-4c66-b9d9-d6459f7eb8e7", "metadata": { "scrolled": true }, "outputs": [], "source": [ "soup" ] }, { "cell_type": "code", "execution_count": null, "id": "55be89c6-538e-454b-aafb-4ab5a4f025a9", "metadata": { "scrolled": true }, "outputs": [], "source": [ "[sou for sou in soup.find('div', {'class': 'ilc_qanswer_Answer'}).find_all('p') if sou.text.strip()]" ] }, { "cell_type": "code", "execution_count": null, "id": "f375b430-a2ea-495c-91a8-640f8dd18903", "metadata": {}, "outputs": [], "source": [ "select_elems" ] }, { "cell_type": "code", "execution_count": null, "id": "4b0e9638-4cc2-44ba-bf62-141cf60b5b7b", "metadata": {}, "outputs": [], "source": [ "soup.find_all('input', {'name': 'multiple_choice_result'})[0]" ] }, { "cell_type": "code", "execution_count": null, "id": "80c056e1-b7c7-4057-aed7-97454a5acfd7", "metadata": { "scrolled": true }, "outputs": [], "source": [ "test" ] }, { "cell_type": "code", "execution_count": null, "id": "7420f15f-7b80-4cc8-bf59-389f94f1f4dc", "metadata": {}, "outputs": [], "source": [ "img" ] }, { "cell_type": "code", "execution_count": null, "id": "5b47197b-23f3-4e73-8935-0e0c2b97c3ca", "metadata": {}, "outputs": [], "source": [ "ilc_question_SingleChoice\n", "ilc_qanswer_Answer" ] }, { "cell_type": "code", "execution_count": null, "id": "dd714667-7de0-462b-94c7-38a6e5e0bdbe", "metadata": { "scrolled": true }, "outputs": [], "source": [ "soupan" ] }, { "cell_type": "code", "execution_count": null, "id": "7e68adfe-b6f0-426a-8219-c60ed84bb9da", "metadata": { "scrolled": true }, "outputs": [], "source": [ "a = get_elements_between(instruction_elems[0], soup.find('div', {'class': 'ilc_question_Standard'}))[0]\n", "print(get_text(a))" ] }, { "cell_type": "code", "execution_count": null, "id": "38148584-310a-4e22-a4d1-8bff5195c580", "metadata": { "scrolled": true }, "outputs": [], "source": [ "print('\\n\\n'.join(middle_context))" ] }, { "cell_type": "code", "execution_count": null, "id": "98359231-78cf-40e3-9c35-b34583cc70d6", "metadata": {}, "outputs": [], "source": [ "get_elements_between(instruction_elems[0], soup.find('div', {'class': 'ilc_question_Standard'}))" ] }, { "cell_type": "code", "execution_count": null, "id": "10bcdeab-7c3a-4dc5-90c3-5d7d6c022487", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "70870990-7e81-4b83-bafb-849483f6850f", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "3b5f8892-eb6c-43aa-8357-33c420d090c8", "metadata": {}, "outputs": [], "source": [ "\n", "from itertools import zip_longest" ] }, { "cell_type": "code", "execution_count": null, "id": "69b94188-ad77-400c-b2cb-7d4f8b57a08d", "metadata": {}, "outputs": [], "source": [ "def print_table_column_wise(html_content):\n", " # Parse the HTML content\n", " soup = BeautifulSoup(html_content, 'html.parser')\n", " \n", " # Find the table\n", " table = soup.find('table')\n", " \n", " if not table:\n", " print(\"No table found in the HTML content.\")\n", " return\n", " \n", " # Extract headers\n", " headers = [th.text.strip() for th in table.find_all('th')]\n", " \n", " # Extract rows\n", " rows = []\n", " for tr in table.find_all('tr')[1:]: # Skip the header row\n", " rows.append([td.text.strip() for td in tr.find_all('td')])\n", " \n", " # Transpose the data (swap rows and columns)\n", " columns = list(zip_longest(*rows, fillvalue=''))\n", " \n", " # Print column-wise\n", " for header, column in zip(headers, columns):\n", " print(f\"{header}:\")\n", " for item in column:\n", " print(f\" {item}\")\n", " print() # Empty line between columns\n", "\n", "# Example usage\n", "html_content = \"\"\"\n", "
Name | \n", "Age | \n", "City | \n", "
---|---|---|
John | \n", "30 | \n", "New York | \n", "
Alice | \n", "25 | \n", "Los Angeles | \n", "
Bob | \n", "35 | \n", "Chicago | \n", "