parquet-converter commited on
Commit
bfddc9b
·
1 Parent(s): 67f9dbf

Update parquet files

Browse files
.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ default/protein_ligand_contacts-train.parquet filter=lfs diff=lfs merge=lfs -text
README.md DELETED
@@ -1,48 +0,0 @@
1
- ---
2
- tags:
3
- - molecules
4
- - chemistry
5
- - SMILES
6
- ---
7
-
8
- ## How to use the data sets
9
-
10
- This dataset contains more than 16,000 unique pairs of protein sequences and ligand SMILES with experimentally determined
11
- binding affinities and protein-ligand contacts (ligand atom/SMILES token vs. Calpha within 5 Angstrom). These
12
- are represented by a list that contains the positions of non-zero elements of the flattened, sparse
13
- sequence x smiles tokens (2048x512) matrix. The first and last entries in both dimensions
14
- are padded to zero, they correspond to [CLS] and [SEP].
15
-
16
- It can be used for fine-tuning a language model.
17
-
18
- The data solely uses data from PDBind-cn.
19
-
20
- Contacts are calculated at four cut-off distances: 5, 8, 11A and 15A.
21
-
22
- ### Use the already preprocessed data
23
-
24
- Load a test/train split using
25
-
26
- ```
27
- from datasets import load_dataset
28
- train = load_dataset("jglaser/protein_ligand_contacts",split='train[:90%]')
29
- validation = load_dataset("jglaser/protein_ligand_contacts",split='train[90%:]')
30
- ```
31
-
32
- ### Pre-process yourself
33
-
34
- To manually perform the preprocessing, download the data sets from P.DBBind-cn
35
-
36
- Register for an account at <https://www.pdbbind.org.cn/>, confirm the validation
37
- email, then login and download
38
-
39
- - the Index files (1)
40
- - the general protein-ligand complexes (2)
41
- - the refined protein-ligand complexes (3)
42
-
43
- Extract those files in `pdbbind/data`
44
-
45
- Run the script `pdbbind.py` in a compute job on an MPI-enabled cluster
46
- (e.g., `mpirun -n 64 pdbbind.py`).
47
-
48
- Perform the steps in the notebook `pdbbind.ipynb`
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/pdbbind_with_contacts.parquet → default/protein_ligand_contacts-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:90b17f9e0253534a0ffd558c231de5d715c5620ab5985fcec59cb7c759f145ee
3
- size 267149495
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8483e355645802bbb2f77d40bdde05d42012b52ad2d8951734ac8bec56e6efb4
3
+ size 205345735
pdbbind.ipynb DELETED
@@ -1,505 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "markdown",
5
- "id": "834aeced-c3c5-42a0-bad1-41e009dd86ee",
6
- "metadata": {},
7
- "source": [
8
- "### Preprocessing"
9
- ]
10
- },
11
- {
12
- "cell_type": "code",
13
- "execution_count": 1,
14
- "id": "86476f6e-802a-463b-a1b0-2ae228bb92af",
15
- "metadata": {},
16
- "outputs": [],
17
- "source": [
18
- "import pandas as pd"
19
- ]
20
- },
21
- {
22
- "cell_type": "code",
23
- "execution_count": 2,
24
- "id": "9b2be11c-f4bb-4107-af49-abd78052afcf",
25
- "metadata": {},
26
- "outputs": [],
27
- "source": [
28
- "df = pd.read_table('data/pdbbind/index/INDEX_general_PL_data.2020',skiprows=4,sep=r'\\s+',usecols=[0,4]).drop(0)\n",
29
- "df = df.rename(columns={'#': 'name','release': 'affinity'})\n",
30
- "df_refined = pd.read_table('data/pdbbind/index/INDEX_refined_data.2020',skiprows=4,sep=r'\\s+',usecols=[0,4]).drop(0)\n",
31
- "df_refined = df_refined.rename(columns={'#': 'name','release': 'affinity'})\n",
32
- "df = pd.concat([df,df_refined])"
33
- ]
34
- },
35
- {
36
- "cell_type": "code",
37
- "execution_count": 3,
38
- "id": "68983ab8-bf11-4ed6-ba06-f962dbdc077e",
39
- "metadata": {},
40
- "outputs": [],
41
- "source": [
42
- "quantities = ['ki','kd','ka','k1/2','kb','ic50','ec50']"
43
- ]
44
- },
45
- {
46
- "cell_type": "code",
47
- "execution_count": 4,
48
- "id": "3acbca3c-9c0b-43a1-a45e-331bf153bcfa",
49
- "metadata": {},
50
- "outputs": [],
51
- "source": [
52
- "from pint import UnitRegistry\n",
53
- "ureg = UnitRegistry()\n",
54
- "\n",
55
- "def to_uM(affinity):\n",
56
- " val = ureg(affinity)\n",
57
- " try:\n",
58
- " return val.m_as(ureg.uM)\n",
59
- " except Exception:\n",
60
- " pass\n",
61
- " \n",
62
- " try:\n",
63
- " return 1/val.m_as(1/ureg.uM)\n",
64
- " except Exception:\n",
65
- " pass"
66
- ]
67
- },
68
- {
69
- "cell_type": "code",
70
- "execution_count": 5,
71
- "id": "58e5748b-2cea-43ff-ab51-85a5021bd50b",
72
- "metadata": {},
73
- "outputs": [],
74
- "source": [
75
- "df['affinity_uM'] = df['affinity'].str.split('[=\\~><]').str[1].apply(to_uM)\n",
76
- "df['affinity_quantity'] = df['affinity'].str.split('[=\\~><]').str[0]"
77
- ]
78
- },
79
- {
80
- "cell_type": "code",
81
- "execution_count": 6,
82
- "id": "d92f0004-68c1-4487-94b9-56b4fd598de4",
83
- "metadata": {},
84
- "outputs": [
85
- {
86
- "data": {
87
- "text/plain": [
88
- "<AxesSubplot:>"
89
- ]
90
- },
91
- "execution_count": 6,
92
- "metadata": {},
93
- "output_type": "execute_result"
94
- },
95
- {
96
- "data": {
97
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYQAAAD4CAYAAADsKpHdAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAAQdUlEQVR4nO3df6zdd13H8efLVkYpFjfHbpp22BKL2m1B3GVWiOSSGncFYveHM8XhOrOkYZk4ZYnp+Adj0mQkoLDFjVTBdjiYdZC0OoYuhStR98MWiKUrywqb22WXlR8ydxHHOt/+cT7Vw+3p7XrOvfe03ucjOTnf8/5+Pt/v5+Z+e173+/l+z2mqCkmSfmTYA5AknRkMBEkSYCBIkhoDQZIEGAiSpGbpsAfQr/PPP7/WrFnTV9/vfe97LF++fG4HJDUeX5pvgxxjBw4c+FZVvbLXurM2ENasWcP+/fv76jsxMcHY2NjcDkhqPL403wY5xpL828nWOWUkSQIMBElSYyBIkgADQZLUGAiSJMBAkCQ1pwyEJB9NcjTJl7tq5yW5L8mj7fncrnU3JTmS5JEkl3fVL01ysK27JUla/Zwkf9XqDyZZM8c/oyTpRXgxZwg7gfEZtW3AvqpaB+xrr0myHtgMXNT63JZkSetzO7AVWNcex7d5LfDvVfVTwJ8A7+v3h5Ek9e+UgVBVnwe+M6O8CdjVlncBV3TV76qq56rqMeAIcFmSlcCKqrq/Ov8Bwx0z+hzf1t3AxuNnD5KkhdPvJ5VHqmoKoKqmklzQ6quAB7raTbba8215Zv14nyfbto4leQb4CeBbM3eaZCudswxGRkaYmJjoa/DT09N995VOxeNrcTj49WeGtu+1r1gyL8fYXH91Ra+/7GuW+mx9TixW7QB2AIyOjla/H932qwU0nzy+Fodrtt0ztH3vHF8+L8dYv3cZPd2mgWjPR1t9Eriwq91q4KlWX92j/kN9kiwFXsGJU1SSpHnWbyDsBba05S3Anq765nbn0Fo6F48fatNLzybZ0K4PXD2jz/Ft/Trw2fI/epakBXfKKaMknwDGgPOTTALvBW4Gdie5FngCuBKgqg4l2Q08DBwDrq+qF9qmrqNzx9Iy4N72APgI8LEkR+icGWyek59MknRaThkIVfX2k6zaeJL224HtPer7gYt71P+LFiiSpOHxk8qSJMBAkCQ1BoIkCTAQJEmNgSBJAgwESVJjIEiSAANBktQYCJIkwECQJDUGgiQJMBAkSY2BIEkCDARJUmMgSJIAA0GS1BgIkiTAQJAkNQaCJAkwECRJjYEgSQIMBElSYyBIkgADQZLUGAiSJMBAkCQ1BoIkCTAQJEmNgSBJAgwESVJjIEiSAANBktQMFAhJfj/JoSRfTvKJJC9Ncl6S+5I82p7P7Wp/U5IjSR5JcnlX/dIkB9u6W5JkkHFJkk5f34GQZBXwu8BoVV0MLAE2A9uAfVW1DtjXXpNkfVt/ETAO3JZkSdvc7cBWYF17jPc7LklSfwadMloKLEuyFHgZ8BSwCdjV1u8CrmjLm4C7quq5qnoMOAJclmQlsKKq7q+qAu7o6iNJWiB9B0JVfR14P/AEMAU8U1V/D4xU1VRrMwVc0LqsAp7s2sRkq61qyzPrkqQFtLTfju3awCZgLfBd4K+TvGO2Lj1qNUu91z630plaYmRkhImJidMY8f+Znp7uu690Kh5fi8ONlxwb2r7n6xjrOxCAXwYeq6pvAiT5FPAG4OkkK6tqqk0HHW3tJ4ELu/qvpjPFNNmWZ9ZPUFU7gB0Ao6OjNTY21tfAJyYm6LevdCoeX4vDNdvuGdq+d44vn5djbJBrCE8AG5K8rN0VtBE4DOwFtrQ2W4A9bXkvsDnJOUnW0rl4/FCbVno2yYa2nau7+kiSFkjfZwhV9WCSu4EvAMeAL9L56/3lwO4k19IJjStb+0NJdgMPt/bXV9ULbXPXATuBZcC97SFJWkCDTBlRVe8F3juj/Byds4Ve7bcD23vU9wMXDzIWSdJg/KSyJAkwECRJjYEgSQIMBElSM9BF5bPVwa8/M7R7iB+/+a1D2a8knYpnCJIkwECQJDUGgiQJMBAkSY2BIEkCDARJUrMobzuV5pO3Nets5RmCJAkwECRJjYEgSQIMBElSYyBIkgADQZLUGAiSJMBAkCQ1BoIkCTAQJEmNgSBJAgwESVJjIEiSAANBktQYCJIkwECQJDUGgiQJMBAkSY2BIEkCDARJUmMgSJKAAQMhyY8nuTvJV5IcTvKLSc5Lcl+SR9vzuV3tb0pyJMkjSS7vql+a5GBbd0uSDDIuSdLpG/QM4UPAZ6rqZ4DXAoeBbcC+qloH7GuvSbIe2AxcBIwDtyVZ0rZzO7AVWNce4wOOS5J0mvoOhCQrgDcBHwGoqh9U1XeBTcCu1mwXcEVb3gTcVVXPVdVjwBHgsiQrgRVVdX9VFXBHVx9J0gJZOkDfVwPfBP4iyWuBA8ANwEhVTQFU1VSSC1r7VcADXf0nW+35tjyzfoIkW+mcSTAyMsLExERfAx9ZBjdecqyvvoPqd8w6e3h8LQ7D+h0DTE9Pz8vvepBAWAr8PPCuqnowyYdo00Mn0eu6QM1SP7FYtQPYATA6OlpjY2OnNeDjbr1zDx84OMiP3r/Hrxobyn61cDy+Fodrtt0ztH3vHF9Ov+9/sxnkGsIkMFlVD7bXd9MJiKfbNBDt+WhX+wu7+q8Gnmr11T3qkqQF1HcgVNU3gCeT/HQrbQQeBvYCW1ptC7CnLe8FNic5J8laOhePH2rTS88m2dDuLrq6q48kaYEMel77LuDOJC8Bvgb8Np2Q2Z3kWuAJ4EqAqjqUZDed0DgGXF9VL7TtXAfsBJYB97aHJGkBDRQIVfUlYLTHqo0nab8d2N6jvh+4eJCxSJIG4yeVJUmAgSBJagwESRJgIEiSGgNBkgQYCJKkxkCQJAEGgiSpMRAkSYCBIElqDARJEmAgSJIaA0GSBBgIkqTGQJAkAQaCJKkxECRJgIEgSWoMBEkSYCBIkhoDQZIEGAiSpMZAkCQBBoIkqTEQJEmAgSBJagwESRJgIEiSGgNBkgQYCJKkxkCQJAEGgiSpGTgQkixJ8sUkf9ten5fkviSPtudzu9relORIkkeSXN5VvzTJwbbuliQZdFySpNMzF2cINwCHu15vA/ZV1TpgX3tNkvXAZuAiYBy4LcmS1ud2YCuwrj3G52BckqTTMFAgJFkNvBX4867yJmBXW94FXNFVv6uqnquqx4AjwGVJVgIrqur+qirgjq4+kqQFsnTA/h8E/gD4sa7aSFVNAVTVVJILWn0V8EBXu8lWe74tz6yfIMlWOmcSjIyMMDEx0degR5bBjZcc66vvoPods84eHl+Lw7B+xwDT09Pz8rvuOxCSvA04WlUHkoy9mC49ajVL/cRi1Q5gB8Do6GiNjb2Y3Z7o1jv38IGDg2Zhfx6/amwo+9XC8fhaHK7Zds/Q9r1zfDn9vv/NZpCj9o3AryV5C/BSYEWSvwSeTrKynR2sBI629pPAhV39VwNPtfrqHnVJ0gLq+xpCVd1UVaurag2di8Wfrap3AHuBLa3ZFmBPW94LbE5yTpK1dC4eP9Sml55NsqHdXXR1Vx9J0gKZj/Pam4HdSa4FngCuBKiqQ0l2Aw8Dx4Drq+qF1uc6YCewDLi3PSRJC2hOAqGqJoCJtvxtYONJ2m0Htveo7wcunouxSJL64yeVJUmAgSBJagwESRJgIEiSGgNBkgQYCJKkxkCQJAEGgiSpMRAkSYCBIElqDARJEmAgSJIaA0GSBBgIkqTGQJAkAQaCJKkxECRJgIEgSWoMBEkSYCBIkhoDQZIEGAiSpMZAkCQBBoIkqTEQJEmAgSBJagwESRJgIEiSGgNBkgQYCJKkxkCQJAEGgiSp6TsQklyY5HNJDic5lOSGVj8vyX1JHm3P53b1uSnJkSSPJLm8q35pkoNt3S1JMtiPJUk6XYOcIRwDbqyqnwU2ANcnWQ9sA/ZV1TpgX3tNW7cZuAgYB25LsqRt63ZgK7CuPcYHGJckqQ99B0JVTVXVF9rys8BhYBWwCdjVmu0CrmjLm4C7quq5qnoMOAJclmQlsKKq7q+qAu7o6iNJWiBzcg0hyRrgdcCDwEhVTUEnNIALWrNVwJNd3SZbbVVbnlmXJC2gpYNuIMnLgU8Cv1dV/zHL9H+vFTVLvde+ttKZWmJkZISJiYnTHi/AyDK48ZJjffUdVL9j1tnD42txGNbvGGB6enpeftcDBUKSH6UTBndW1ada+ekkK6tqqk0HHW31SeDCru6rgadafXWP+gmqagewA2B0dLTGxsb6Gvetd+7hAwcHzsK+PH7V2FD2q4Xj8bU4XLPtnqHte+f4cvp9/5vNIHcZBfgIcLiq/rhr1V5gS1veAuzpqm9Ock6StXQuHj/UppWeTbKhbfPqrj6SpAUyyJ8xbwR+CziY5Eut9h7gZmB3kmuBJ4ArAarqUJLdwMN07lC6vqpeaP2uA3YCy4B720OStID6DoSq+kd6z/8DbDxJn+3A9h71/cDF/Y5FkjQ4P6ksSQIMBElSYyBIkgADQZLUGAiSJMBAkCQ1BoIkCTAQJEmNgSBJAgwESVJjIEiSAANBktQYCJIkwECQJDUGgiQJMBAkSY2BIEkCDARJUmMgSJIAA0GS1BgIkiTAQJAkNQaCJAkwECRJjYEgSQIMBElSYyBIkgADQZLUGAiSJMBAkCQ1BoIkCTAQJEmNgSBJAs6gQEgynuSRJEeSbBv2eCRpsTkjAiHJEuBPgV8F1gNvT7J+uKOSpMXljAgE4DLgSFV9rap+ANwFbBrymCRpUVk67AE0q4Anu15PAr8ws1GSrcDW9nI6ySN97u984Ft99h1I3jeMvWqBeXxpXr35fQMdYz95shVnSiCkR61OKFTtAHYMvLNkf1WNDrodqRePL823+TrGzpQpo0ngwq7Xq4GnhjQWSVqUzpRA+BdgXZK1SV4CbAb2DnlMkrSonBFTRlV1LMnvAH8HLAE+WlWH5nGXA087SbPw+NJ8m5djLFUnTNVLkhahM2XKSJI0ZAaCJAlYRIGQZLpr+S1JHk3yqiTvTHL1MMems8OMY+g1ST7dvmrlcJLdSUaSrEny/SRfao8Pd/W5NMnB1ueWJL1ut5b+18net2ZrN4gz4qLyQkqyEbgV+JWqegL48Cm6SD8kyUuBe4B3V9XftNqbgVcC08BXq+rnenS9nc4HKx8APg2MA/cuxJh1duvxvjUvFlUgJPkl4M+At1TVV1vtD4Hpqnr/MMems8pvAvcfDwOAqvocQJI1vTokWQmsqKr72+s7gCswEHQKJ3nfWgt8nM57+Gfmal+LZsoIOAfYA1xRVV8Z9mB0VrsYODDL+rVJvpjkH9o/Zuh8PctkV5vJVpNmc7L3rQ8Bt1fV64FvzNXOFlMgPA/8M3DtsAei/9emgFdV1euAdwMfT7KCF/n1LNIMJ3vfeiPwibb8sbna2WIKhP8GfgN4fZL3DHswOqsdAi7ttaKqnquqb7flA8BXgdfQOSNY3dXUr2fRizHb+9ac/0GxmAKBqvpP4G3AVUk8U1C/Pg68Iclbjxfaf/B0SZJXtv/fgySvBtYBX6uqKeDZJBva3UVX05kKkGZ1kvetf6LzFT8AV83VvhbVRWWAqvpOknHg80mG8hXFOrtV1feTvA34YJIP0jmt/1fgBuBNwB8lOQa8ALyzqr7Tul4H7ASW0bmY7AVlvSg93rduoDMdeQPwybnaj19dIUkCFtmUkSTp5AwESRJgIEiSGgNBkgQYCJKkxkCQJAEGgiSp+R+8kn6fv7jc8QAAAABJRU5ErkJggg==\n",
98
- "text/plain": [
99
- "<Figure size 432x288 with 1 Axes>"
100
- ]
101
- },
102
- "metadata": {
103
- "needs_background": "light"
104
- },
105
- "output_type": "display_data"
106
- }
107
- ],
108
- "source": [
109
- "df['affinity_quantity'].hist()"
110
- ]
111
- },
112
- {
113
- "cell_type": "code",
114
- "execution_count": 7,
115
- "id": "aa358835-55f3-4551-9217-e76a15de4fe8",
116
- "metadata": {},
117
- "outputs": [],
118
- "source": [
119
- "df_filter = df[df['affinity_quantity'].str.lower().isin(quantities)]\n",
120
- "df_filter = df_filter.dropna()"
121
- ]
122
- },
123
- {
124
- "cell_type": "code",
125
- "execution_count": 8,
126
- "id": "802cb9bc-2563-4d7f-9a76-3be2d9263a36",
127
- "metadata": {},
128
- "outputs": [],
129
- "source": [
130
- "cutoffs = [5,8,11,15]"
131
- ]
132
- },
133
- {
134
- "cell_type": "code",
135
- "execution_count": 9,
136
- "id": "d8e71a8c-11a3-41f0-ab61-3ddc57e10961",
137
- "metadata": {},
138
- "outputs": [],
139
- "source": [
140
- "dfs_complex = {c: pd.read_parquet('data/pdbbind_complex_{}.parquet'.format(c)) for c in cutoffs}"
141
- ]
142
- },
143
- {
144
- "cell_type": "code",
145
- "execution_count": 10,
146
- "id": "ed3fe035-6035-4d39-b072-d12dc0a95857",
147
- "metadata": {},
148
- "outputs": [],
149
- "source": [
150
- "import dask.array as da\n",
151
- "import dask.dataframe as dd\n",
152
- "from dask.bag import from_delayed\n",
153
- "from dask import delayed\n",
154
- "import pyarrow as pa\n",
155
- "import pyarrow.parquet as pq"
156
- ]
157
- },
158
- {
159
- "cell_type": "code",
160
- "execution_count": 11,
161
- "id": "cd26125b-e68b-4fa3-846e-2b6e7f635fe0",
162
- "metadata": {},
163
- "outputs": [
164
- {
165
- "name": "stdout",
166
- "output_type": "stream",
167
- "text": [
168
- "(2046, 510)\n"
169
- ]
170
- }
171
- ],
172
- "source": [
173
- "contacts_dask = [da.from_npy_stack('data/pdbbind_contacts_{}'.format(c)) for c in cutoffs]\n",
174
- "shape = contacts_dask[0][0].shape\n",
175
- "print(shape)"
176
- ]
177
- },
178
- {
179
- "cell_type": "code",
180
- "execution_count": 12,
181
- "id": "9c7c9849-2345-4baf-89e7-d412f52353b6",
182
- "metadata": {},
183
- "outputs": [
184
- {
185
- "data": {
186
- "text/html": [
187
- "<table>\n",
188
- "<tr>\n",
189
- "<td>\n",
190
- "<table>\n",
191
- " <thead>\n",
192
- " <tr><td> </td><th> Array </th><th> Chunk </th></tr>\n",
193
- " </thead>\n",
194
- " <tbody>\n",
195
- " <tr><th> Bytes </th><td> 2.72 GiB </td> <td> 2.72 GiB </td></tr>\n",
196
- " <tr><th> Shape </th><td> (700, 2046, 510) </td> <td> (700, 2046, 510) </td></tr>\n",
197
- " <tr><th> Count </th><td> 25 Tasks </td><td> 1 Chunks </td></tr>\n",
198
- " <tr><th> Type </th><td> float32 </td><td> numpy.ndarray </td></tr>\n",
199
- " </tbody>\n",
200
- "</table>\n",
201
- "</td>\n",
202
- "<td>\n",
203
- "<svg width=\"128\" height=\"195\" style=\"stroke:rgb(0,0,0);stroke-width:1\" >\n",
204
- "\n",
205
- " <!-- Horizontal lines -->\n",
206
- " <line x1=\"10\" y1=\"0\" x2=\"35\" y2=\"25\" style=\"stroke-width:2\" />\n",
207
- " <line x1=\"10\" y1=\"120\" x2=\"35\" y2=\"145\" style=\"stroke-width:2\" />\n",
208
- "\n",
209
- " <!-- Vertical lines -->\n",
210
- " <line x1=\"10\" y1=\"0\" x2=\"10\" y2=\"120\" style=\"stroke-width:2\" />\n",
211
- " <line x1=\"35\" y1=\"25\" x2=\"35\" y2=\"145\" style=\"stroke-width:2\" />\n",
212
- "\n",
213
- " <!-- Colored Rectangle -->\n",
214
- " <polygon points=\"10.0,0.0 35.86269549127143,25.86269549127143 35.86269549127143,145.86269549127144 10.0,120.0\" style=\"fill:#ECB172A0;stroke-width:0\"/>\n",
215
- "\n",
216
- " <!-- Horizontal lines -->\n",
217
- " <line x1=\"10\" y1=\"0\" x2=\"52\" y2=\"0\" style=\"stroke-width:2\" />\n",
218
- " <line x1=\"35\" y1=\"25\" x2=\"78\" y2=\"25\" style=\"stroke-width:2\" />\n",
219
- "\n",
220
- " <!-- Vertical lines -->\n",
221
- " <line x1=\"10\" y1=\"0\" x2=\"35\" y2=\"25\" style=\"stroke-width:2\" />\n",
222
- " <line x1=\"52\" y1=\"0\" x2=\"78\" y2=\"25\" style=\"stroke-width:2\" />\n",
223
- "\n",
224
- " <!-- Colored Rectangle -->\n",
225
- " <polygon points=\"10.0,0.0 52.88780092952726,0.0 78.7504964207987,25.86269549127143 35.86269549127143,25.86269549127143\" style=\"fill:#ECB172A0;stroke-width:0\"/>\n",
226
- "\n",
227
- " <!-- Horizontal lines -->\n",
228
- " <line x1=\"35\" y1=\"25\" x2=\"78\" y2=\"25\" style=\"stroke-width:2\" />\n",
229
- " <line x1=\"35\" y1=\"145\" x2=\"78\" y2=\"145\" style=\"stroke-width:2\" />\n",
230
- "\n",
231
- " <!-- Vertical lines -->\n",
232
- " <line x1=\"35\" y1=\"25\" x2=\"35\" y2=\"145\" style=\"stroke-width:2\" />\n",
233
- " <line x1=\"78\" y1=\"25\" x2=\"78\" y2=\"145\" style=\"stroke-width:2\" />\n",
234
- "\n",
235
- " <!-- Colored Rectangle -->\n",
236
- " <polygon points=\"35.86269549127143,25.86269549127143 78.7504964207987,25.86269549127143 78.7504964207987,145.86269549127144 35.86269549127143,145.86269549127144\" style=\"fill:#ECB172A0;stroke-width:0\"/>\n",
237
- "\n",
238
- " <!-- Text -->\n",
239
- " <text x=\"57.306596\" y=\"165.862695\" font-size=\"1.0rem\" font-weight=\"100\" text-anchor=\"middle\" >510</text>\n",
240
- " <text x=\"98.750496\" y=\"85.862695\" font-size=\"1.0rem\" font-weight=\"100\" text-anchor=\"middle\" transform=\"rotate(-90,98.750496,85.862695)\">2046</text>\n",
241
- " <text x=\"12.931348\" y=\"152.931348\" font-size=\"1.0rem\" font-weight=\"100\" text-anchor=\"middle\" transform=\"rotate(45,12.931348,152.931348)\">700</text>\n",
242
- "</svg>\n",
243
- "</td>\n",
244
- "</tr>\n",
245
- "</table>"
246
- ],
247
- "text/plain": [
248
- "dask.array<blocks, shape=(700, 2046, 510), dtype=float32, chunksize=(700, 2046, 510), chunktype=numpy.ndarray>"
249
- ]
250
- },
251
- "execution_count": 12,
252
- "metadata": {},
253
- "output_type": "execute_result"
254
- }
255
- ],
256
- "source": [
257
- "contacts_dask[0].blocks[1]"
258
- ]
259
- },
260
- {
261
- "cell_type": "code",
262
- "execution_count": 13,
263
- "id": "0bd8e9b9-9713-4572-bd7f-dc47da9fce91",
264
- "metadata": {},
265
- "outputs": [
266
- {
267
- "data": {
268
- "text/plain": [
269
- "[16232, 16228, 16226, 16223]"
270
- ]
271
- },
272
- "execution_count": 13,
273
- "metadata": {},
274
- "output_type": "execute_result"
275
- }
276
- ],
277
- "source": [
278
- "[len(c) for c in contacts_dask]"
279
- ]
280
- },
281
- {
282
- "cell_type": "code",
283
- "execution_count": 14,
284
- "id": "87493934-3839-476a-a975-7da057c320da",
285
- "metadata": {},
286
- "outputs": [
287
- {
288
- "data": {
289
- "text/plain": [
290
- "16232"
291
- ]
292
- },
293
- "execution_count": 14,
294
- "metadata": {},
295
- "output_type": "execute_result"
296
- }
297
- ],
298
- "source": [
299
- "contacts_dask[0].shape[0]"
300
- ]
301
- },
302
- {
303
- "cell_type": "code",
304
- "execution_count": 15,
305
- "id": "42e95d84-ef27-4417-9479-8b356462b8c3",
306
- "metadata": {},
307
- "outputs": [],
308
- "source": [
309
- "import numpy as np\n",
310
- "all_partitions = []\n",
311
- "for c, cutoff in zip(contacts_dask,cutoffs):\n",
312
- " def chunk_to_sparse(rcut, chunk, idx_chunk):\n",
313
- " res = dfs_complex[rcut].iloc[idx_chunk][['name']].copy()\n",
314
- " # pad to account for [CLS] and [SEP]\n",
315
- " res['contacts_{}A'.format(rcut)] = [np.where(np.pad(a,pad_width=(1,1)).flatten())[0] for a in chunk]\n",
316
- " return res\n",
317
- "\n",
318
- " partitions = [delayed(chunk_to_sparse)(cutoff,b,k)\n",
319
- " for b,k in zip(c.blocks, da.arange(c.shape[0],chunks=c.chunks[0:1]).blocks)\n",
320
- " ]\n",
321
- " all_partitions.append(partitions)"
322
- ]
323
- },
324
- {
325
- "cell_type": "code",
326
- "execution_count": 16,
327
- "id": "5520a925-693f-43f0-9e76-df2e128f272e",
328
- "metadata": {},
329
- "outputs": [
330
- {
331
- "data": {
332
- "text/html": [
333
- "<div>\n",
334
- "<style scoped>\n",
335
- " .dataframe tbody tr th:only-of-type {\n",
336
- " vertical-align: middle;\n",
337
- " }\n",
338
- "\n",
339
- " .dataframe tbody tr th {\n",
340
- " vertical-align: top;\n",
341
- " }\n",
342
- "\n",
343
- " .dataframe thead th {\n",
344
- " text-align: right;\n",
345
- " }\n",
346
- "</style>\n",
347
- "<table border=\"1\" class=\"dataframe\">\n",
348
- " <thead>\n",
349
- " <tr style=\"text-align: right;\">\n",
350
- " <th></th>\n",
351
- " <th>name</th>\n",
352
- " <th>contacts_5A</th>\n",
353
- " </tr>\n",
354
- " </thead>\n",
355
- " <tbody>\n",
356
- " <tr>\n",
357
- " <th>0</th>\n",
358
- " <td>10gs</td>\n",
359
- " <td>[3083, 3084, 3086, 3087, 3088, 3089, 3094, 309...</td>\n",
360
- " </tr>\n",
361
- " <tr>\n",
362
- " <th>1</th>\n",
363
- " <td>184l</td>\n",
364
- " <td>[39945, 39946, 39947, 39948, 43010, 43012, 430...</td>\n",
365
- " </tr>\n",
366
- " <tr>\n",
367
- " <th>2</th>\n",
368
- " <td>186l</td>\n",
369
- " <td>[39943, 39944, 39945, 43010, 43011, 43012, 430...</td>\n",
370
- " </tr>\n",
371
- " <tr>\n",
372
- " <th>3</th>\n",
373
- " <td>187l</td>\n",
374
- " <td>[39937, 39938, 39947, 43009, 43010, 43012, 430...</td>\n",
375
- " </tr>\n",
376
- " <tr>\n",
377
- " <th>4</th>\n",
378
- " <td>188l</td>\n",
379
- " <td>[39937, 39938, 39940, 39941, 43009, 43010, 430...</td>\n",
380
- " </tr>\n",
381
- " </tbody>\n",
382
- "</table>\n",
383
- "</div>"
384
- ],
385
- "text/plain": [
386
- " name contacts_5A\n",
387
- "0 10gs [3083, 3084, 3086, 3087, 3088, 3089, 3094, 309...\n",
388
- "1 184l [39945, 39946, 39947, 39948, 43010, 43012, 430...\n",
389
- "2 186l [39943, 39944, 39945, 43010, 43011, 43012, 430...\n",
390
- "3 187l [39937, 39938, 39947, 43009, 43010, 43012, 430...\n",
391
- "4 188l [39937, 39938, 39940, 39941, 43009, 43010, 430..."
392
- ]
393
- },
394
- "execution_count": 16,
395
- "metadata": {},
396
- "output_type": "execute_result"
397
- }
398
- ],
399
- "source": [
400
- "all_partitions[0][0].compute().head()"
401
- ]
402
- },
403
- {
404
- "cell_type": "code",
405
- "execution_count": 17,
406
- "id": "4982c3b1-5ce9-4f17-9834-a02c4e136bc2",
407
- "metadata": {},
408
- "outputs": [],
409
- "source": [
410
- "ddfs = [dd.from_delayed(p) for p in all_partitions]"
411
- ]
412
- },
413
- {
414
- "cell_type": "code",
415
- "execution_count": 18,
416
- "id": "f6cdee43-33c6-445c-8619-ace20f90638c",
417
- "metadata": {},
418
- "outputs": [],
419
- "source": [
420
- "ddf_all = None\n",
421
- "for d in ddfs:\n",
422
- " if ddf_all is not None:\n",
423
- " ddf_all = ddf_all.merge(d, on='name')\n",
424
- " else:\n",
425
- " ddf_all = d\n",
426
- "ddf_all = ddf_all.merge(df_filter,on='name')\n",
427
- "ddf_all = ddf_all.merge(list(dfs_complex.values())[0],on='name')"
428
- ]
429
- },
430
- {
431
- "cell_type": "code",
432
- "execution_count": null,
433
- "id": "8f49f871-76f6-4fb2-b2db-c0794d4c07bf",
434
- "metadata": {},
435
- "outputs": [],
436
- "source": [
437
- "%%time\n",
438
- "df_all_contacts = ddf_all.compute()"
439
- ]
440
- },
441
- {
442
- "cell_type": "code",
443
- "execution_count": null,
444
- "id": "45e4b4fa-6338-4abe-bd6e-8aea46e2a09c",
445
- "metadata": {},
446
- "outputs": [],
447
- "source": [
448
- "df_all_contacts['neg_log10_affinity_M'] = 6-np.log10(df_all_contacts['affinity_uM'])"
449
- ]
450
- },
451
- {
452
- "cell_type": "code",
453
- "execution_count": null,
454
- "id": "7c3db301-6565-4053-bbd4-139bb41dd1c4",
455
- "metadata": {},
456
- "outputs": [],
457
- "source": [
458
- "from sklearn.preprocessing import StandardScaler\n",
459
- "scaler = StandardScaler()\n",
460
- "df_all_contacts['affinity'] = scaler.fit_transform(df_all_contacts['neg_log10_affinity_M'].values.reshape(-1,1))\n",
461
- "scaler.mean_, scaler.var_"
462
- ]
463
- },
464
- {
465
- "cell_type": "code",
466
- "execution_count": null,
467
- "id": "c9d674bb-d6a2-4810-aa2b-e3bc3b4bbc98",
468
- "metadata": {},
469
- "outputs": [],
470
- "source": [
471
- "# save to parquet\n",
472
- "df_all_contacts.drop(columns=['name','affinity_quantity']).astype({'affinity': 'float32','neg_log10_affinity_M': 'float32'}).to_parquet('data/pdbbind_with_contacts.parquet',index=False)"
473
- ]
474
- },
475
- {
476
- "cell_type": "code",
477
- "execution_count": null,
478
- "id": "0e532f31-9157-47e9-be82-86c13565f3da",
479
- "metadata": {},
480
- "outputs": [],
481
- "source": []
482
- }
483
- ],
484
- "metadata": {
485
- "kernelspec": {
486
- "display_name": "Python 3 (ipykernel)",
487
- "language": "python",
488
- "name": "python3"
489
- },
490
- "language_info": {
491
- "codemirror_mode": {
492
- "name": "ipython",
493
- "version": 3
494
- },
495
- "file_extension": ".py",
496
- "mimetype": "text/x-python",
497
- "name": "python",
498
- "nbconvert_exporter": "python",
499
- "pygments_lexer": "ipython3",
500
- "version": "3.9.6"
501
- }
502
- },
503
- "nbformat": 4,
504
- "nbformat_minor": 5
505
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pdbbind.py DELETED
@@ -1,111 +0,0 @@
1
- from mpi4py import MPI
2
- from mpi4py.futures import MPICommExecutor
3
-
4
- import warnings
5
- from Bio.PDB import PDBParser, PPBuilder, CaPPBuilder
6
- from Bio.PDB.NeighborSearch import NeighborSearch
7
- from Bio.PDB.Selection import unfold_entities
8
-
9
- import numpy as np
10
- import dask.array as da
11
-
12
- from rdkit import Chem
13
-
14
- import os
15
- import re
16
- import sys
17
-
18
- # all punctuation
19
- punctuation_regex = r"""(\(|\)|\.|=|#|-|\+|\\|\/|:|~|@|\?|>>?|\*|\$|\%[0-9]{2}|[0-9])"""
20
-
21
- # tokenization regex (Schwaller)
22
- molecule_regex = r"""(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\|\/|:|~|@|\?|>>?|\*|\$|\%[0-9]{2}|[0-9])"""
23
-
24
- cutoff = int(sys.argv[1])
25
- max_seq = 2046 # = 2048 - 2 (accounting for [CLS] and [SEP])
26
- max_smiles = 510 # = 512 - 2
27
- chunk_size = '1G'
28
-
29
- def parse_complex(fn):
30
- try:
31
- name = os.path.basename(fn)
32
-
33
- # parse protein sequence and coordinates
34
- parser = PDBParser()
35
- with warnings.catch_warnings():
36
- warnings.simplefilter("ignore")
37
- structure = parser.get_structure('protein',fn+'/'+name+'_protein.pdb')
38
-
39
- # ppb = PPBuilder()
40
- ppb = CaPPBuilder()
41
- seq = []
42
- for pp in ppb.build_peptides(structure):
43
- seq.append(str(pp.get_sequence()))
44
- seq = ''.join(seq)
45
-
46
- # parse ligand, convert to SMILES and map atoms
47
- suppl = Chem.SDMolSupplier(fn+'/'+name+'_ligand.sdf')
48
- mol = next(suppl)
49
- smi = Chem.MolToSmiles(mol)
50
-
51
- # position of atoms in SMILES (not counting punctuation)
52
- atom_order = [int(s) for s in list(filter(None,re.sub(r'[\[\]]','',mol.GetProp("_smilesAtomOutputOrder")).split(',')))]
53
-
54
- # tokenize the SMILES
55
- tokens = list(filter(None, re.split(molecule_regex, smi)))
56
-
57
- # remove punctuation
58
- masked_tokens = [re.sub(punctuation_regex,'',s) for s in tokens]
59
-
60
- k = 0
61
- token_pos = []
62
- token_id = []
63
- for i,token in enumerate(masked_tokens):
64
- if token != '':
65
- token_pos.append(tuple(mol.GetConformer().GetAtomPosition(atom_order[k])))
66
- token_id.append(i)
67
- k += 1
68
-
69
- # query protein for ligand contacts
70
- atoms = unfold_entities(structure, 'A')
71
- neighbor_search = NeighborSearch(atoms)
72
-
73
- close_residues = [neighbor_search.search(center=t, level='R', radius=cutoff) for t in token_pos]
74
- first_residue = next(structure.get_residues()).get_id()[1]
75
- residue_id = [[c.get_id()[1]-first_residue for c in query if not c.get_id()[0].startswith(('H','W'))]
76
- for query in close_residues] # zero-based, exclude hetero atoms
77
-
78
- # contact map
79
- contact_map = np.zeros((max_seq, max_smiles),dtype=np.float32)
80
-
81
- for query,t in zip(residue_id,token_id):
82
- for r in query:
83
- contact_map[r,t] = 1
84
-
85
- return name, seq, smi, contact_map
86
- except Exception as e:
87
- print(e)
88
- return None
89
-
90
-
91
- if __name__ == '__main__':
92
- import glob
93
-
94
- filenames = glob.glob('data/pdbbind/v2020-other-PL/*')
95
- filenames.extend(glob.glob('data/pdbbind/refined-set/*'))
96
- filenames = sorted(filenames)
97
- comm = MPI.COMM_WORLD
98
- with MPICommExecutor(comm, root=0) as executor:
99
- if executor is not None:
100
- result = executor.map(parse_complex, filenames)
101
- result = list(result)
102
- names = [r[0] for r in result if r is not None]
103
- seqs = [r[1] for r in result if r is not None]
104
- all_smiles = [r[2] for r in result if r is not None]
105
- all_contacts = [r[3] for r in result if r is not None]
106
-
107
- import pandas as pd
108
- df = pd.DataFrame({'name': names, 'seq': seqs, 'smiles': all_smiles})
109
- all_contacts = da.from_array(all_contacts, chunks=chunk_size)
110
- da.to_npy_stack('data/pdbbind_contacts_{}/'.format(cutoff), all_contacts)
111
- df.to_parquet('data/pdbbind_complex_{}.parquet'.format(cutoff))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pdbbind.slurm DELETED
@@ -1,12 +0,0 @@
1
- #!/bin/bash
2
- #SBATCH -J preprocess_pdbbind
3
- #SBATCH -p gpu
4
- #SBATCH -A STF006
5
- #SBATCH -t 3:00:00
6
- #SBATCH -N 2
7
- #SBATCH --ntasks-per-node=16
8
-
9
- srun python pdbbind.py 5
10
- srun python pdbbind.py 8
11
- srun python pdbbind.py 11
12
- srun python pdbbind.py 15
 
 
 
 
 
 
 
 
 
 
 
 
 
protein_ligand_contacts.py DELETED
@@ -1,133 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """TODO: A dataset of protein sequences, ligand SMILES, binding affinities and contacts."""
16
-
17
- import huggingface_hub
18
- import os
19
- import pyarrow.parquet as pq
20
- import datasets
21
-
22
-
23
- # TODO: Add BibTeX citation
24
- # Find for instance the citation on arxiv or on the dataset repo/website
25
- _CITATION = """\
26
- @InProceedings{huggingface:dataset,
27
- title = {jglaser/protein_ligand_contacts},
28
- author={Jens Glaser, ORNL
29
- },
30
- year={2022}
31
- }
32
- """
33
-
34
- # TODO: Add description of the dataset here
35
- # You can copy an official description
36
- _DESCRIPTION = """\
37
- A dataset to fine-tune language models on protein-ligand binding affinity and contact prediction.
38
- """
39
-
40
- # TODO: Add a link to an official homepage for the dataset here
41
- _HOMEPAGE = ""
42
-
43
- # TODO: Add the licence for the dataset here if you can find it
44
- _LICENSE = "BSD two-clause"
45
-
46
- # TODO: Add link to the official dataset URLs here
47
- # The HuggingFace dataset library don't host the datasets but only point to the original files
48
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
49
- _URL = "https://huggingface.co/datasets/jglaser/protein_ligand_contacts/resolve/main/"
50
- _data_dir = "data/"
51
- _file_names = {'default': _data_dir+'pdbbind_with_contacts.parquet'}
52
-
53
- _URLs = {name: _URL+_file_names[name] for name in _file_names}
54
-
55
-
56
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
57
- class ProteinLigandContacts(datasets.ArrowBasedBuilder):
58
- """List of protein sequences, ligand SMILES, binding affinities and contacts."""
59
-
60
- VERSION = datasets.Version("1.4.1")
61
-
62
- def _info(self):
63
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
64
- #if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
65
- # features = datasets.Features(
66
- # {
67
- # "sentence": datasets.Value("string"),
68
- # "option1": datasets.Value("string"),
69
- # "answer": datasets.Value("string")
70
- # # These are the features of your dataset like images, labels ...
71
- # }
72
- # )
73
- #else: # This is an example to show how to have different features for "first_domain" and "second_domain"
74
- features = datasets.Features(
75
- {
76
- "seq": datasets.Value("string"),
77
- "smiles": datasets.Value("string"),
78
- "affinity_uM": datasets.Value("float"),
79
- "neg_log10_affinity_M": datasets.Value("float"),
80
- "affinity": datasets.Value("float"),
81
- "contacts_5A": datasets.Sequence(datasets.Value('int64')),
82
- "contacts_8A": datasets.Sequence(datasets.Value('int64')),
83
- "contacts_11A": datasets.Sequence(datasets.Value('int64')),
84
- "contacts_15A": datasets.Sequence(datasets.Value('int64')),
85
- # These are the features of your dataset like images, labels ...
86
- }
87
- )
88
- return datasets.DatasetInfo(
89
- # This is the description that will appear on the datasets page.
90
- description=_DESCRIPTION,
91
- # This defines the different columns of the dataset and their types
92
- features=features, # Here we define them above because they are different between the two configurations
93
- # If there's a common (input, target) tuple from the features,
94
- # specify them here. They'll be used if as_supervised=True in
95
- # builder.as_dataset.
96
- supervised_keys=None,
97
- # Homepage of the dataset for documentation
98
- homepage=_HOMEPAGE,
99
- # License for the dataset if available
100
- license=_LICENSE,
101
- # Citation for the dataset
102
- citation=_CITATION,
103
- )
104
-
105
- def _split_generators(self, dl_manager):
106
- """Returns SplitGenerators."""
107
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
108
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
109
-
110
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
111
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
112
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
113
- files = dl_manager.download_and_extract(_URLs)
114
-
115
- return [
116
- datasets.SplitGenerator(
117
- # These kwargs will be passed to _generate_examples
118
- name=datasets.Split.TRAIN,
119
- gen_kwargs={
120
- 'filepath': files["default"],
121
- },
122
- ),
123
-
124
- ]
125
-
126
- def _generate_tables(
127
- self, filepath
128
- ):
129
- from pyarrow import fs
130
- local = fs.LocalFileSystem()
131
-
132
- for i, f in enumerate([filepath]):
133
- yield i, pq.read_table(f,filesystem=local)