UMAP_INF_features.ipynb 16.4 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# UMAP projection"
   ]
  },
  {
   "cell_type": "code",
Alessia Marcolini's avatar
Alessia Marcolini committed
12
   "execution_count": null,
13
14
15
16
17
18
19
20
21
22
23
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import pandas as pd\n",
    "from pathlib import Path\n",
    "import numpy as np\n",
    "\n",
    "from bokeh.plotting import figure, output_file, show, save\n",
    "from bokeh.io import output_notebook, export_png\n",
    "from bokeh.palettes import colorblind\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
24
25
26
27
28
29
30
31
32
    "from bokeh.models import (\n",
    "    CategoricalColorMapper,\n",
    "    ColumnDataSource,\n",
    "    LassoSelectTool,\n",
    "    WheelZoomTool,\n",
    "    ZoomInTool,\n",
    "    BoxZoomTool,\n",
    "    ResetTool,\n",
    ")\n",
33
34
35
36
37
38
39
40
41
    "from bokeh.layouts import gridplot\n",
    "from bokeh.resources import CDN\n",
    "from bokeh.embed import file_html\n",
    "\n",
    "import umap"
   ]
  },
  {
   "cell_type": "code",
Alessia Marcolini's avatar
Alessia Marcolini committed
42
   "execution_count": null,
43
   "metadata": {},
Alessia Marcolini's avatar
Alessia Marcolini committed
44
   "outputs": [],
45
46
47
48
49
50
51
52
53
54
55
56
57
   "source": [
    "output_notebook()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Load Features datasets"
   ]
  },
  {
   "cell_type": "code",
Alessia Marcolini's avatar
Alessia Marcolini committed
58
   "execution_count": null,
59
60
61
   "metadata": {},
   "outputs": [],
   "source": [
Alessia Marcolini's avatar
Alessia Marcolini committed
62
63
64
    "TASK = \"subtypes\"\n",
    "DATASET = \"tcga_breast\"\n",
    "MODEL = \"randomForest\"\n",
65
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
66
    "layers = \"gene_cnv_prot\"\n",
67
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
68
69
    "PATH = Path(\"data\") / DATASET / TASK\n",
    "PATH_RESULTS = Path(\"results\") / DATASET / TASK / MODEL\n",
Nicole Bussola's avatar
Nicole Bussola committed
70
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
71
    "SPLIT = 2  # choose a random split for the train, test, and test2 files"
72
73
74
75
   ]
  },
  {
   "cell_type": "code",
Nicole Bussola's avatar
Nicole Bussola committed
76
   "execution_count": null,
77
78
79
   "metadata": {},
   "outputs": [],
   "source": [
Alessia Marcolini's avatar
Alessia Marcolini committed
80
81
82
    "file_tr = f\"{PATH}/{SPLIT}/{layers}_tr.txt\"  # Fit UMAP\n",
    "file_test = f\"{PATH}/{SPLIT}/{layers}_ts.txt\"  # test UMAP on TS\n",
    "file_test2 = f\"{PATH}/{SPLIT}/{layers}_ts2.txt\"  # test UMAP on TS2\n",
83
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
84
85
86
    "features_train = pd.read_csv(file_tr, sep=\"\\t\", header=0, index_col=0)\n",
    "features_test = pd.read_csv(file_test, sep=\"\\t\", header=0, index_col=0)\n",
    "features_test2 = pd.read_csv(file_test2, sep=\"\\t\", header=0, index_col=0)"
87
88
89
90
   ]
  },
  {
   "cell_type": "code",
Nicole Bussola's avatar
Nicole Bussola committed
91
   "execution_count": null,
92
93
94
   "metadata": {},
   "outputs": [],
   "source": [
Alessia Marcolini's avatar
Alessia Marcolini committed
95
    "BEST = False  # restrict the features to the INF signature\n",
96
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
97
98
99
100
    "INF_feats = pd.read_csv(\n",
    "    f\"{PATH_RESULTS}/{SPLIT}/rSNFi/{layers}_ts_RandomForest_KBest_featurelist.txt\",\n",
    "    sep=\"\\t\",\n",
    ")[\"FEATURE_NAME\"].values.tolist()\n",
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
    "\n",
    "best_train = features_train[INF_feats]\n",
    "best_test = features_test[INF_feats]\n",
    "best_test2 = features_test2[INF_feats]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if BEST:\n",
    "    features_train = best_train\n",
    "    features_test = best_test\n",
    "    features_test2 = best_test2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "samples_tr = features_train.index\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
126
127
128
    "labels_tr = pd.read_csv(f\"{PATH}/{SPLIT}/labels_{TASK}_tr.txt\", sep=\"\\t\", header=None)[\n",
    "    0\n",
    "].tolist()\n",
129
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
130
131
    "features_train[\"labels\"] = labels_tr\n",
    "labels_tr = features_train[\"labels\"]\n",
132
133
134
135
    "features_tr = features_train[features_train.columns[:-1]].values\n",
    "\n",
    "\n",
    "samples_test = features_test.index\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
136
137
138
    "labels_test = pd.read_csv(\n",
    "    f\"{PATH}/{SPLIT}/labels_{TASK}_ts.txt\", sep=\"\\t\", header=None\n",
    ")[0].tolist()\n",
139
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
140
141
    "features_test[\"labels\"] = labels_test\n",
    "labels_test = features_test[\"labels\"]\n",
142
143
144
145
    "features_ts = features_test[features_test.columns[:-1]].values\n",
    "\n",
    "\n",
    "samples_test2 = features_test2.index\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
146
147
148
    "labels_test2 = pd.read_csv(\n",
    "    f\"{PATH}/{SPLIT}/labels_{TASK}_ts2.txt\", sep=\"\\t\", header=None\n",
    ")[0].tolist()\n",
149
150
    "\n",
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
151
152
    "features_test2[\"labels\"] = labels_test2\n",
    "labels_test2 = features_test2[\"labels\"]\n",
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
    "features_ts2 = features_test2[features_test2.columns[:-1]].values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_data = features_tr\n",
    "test_data = features_ts\n",
    "test2_data = features_ts2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Check\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
174
175
176
    "print(len(features_tr), len(samples_tr), len(labels_tr))\n",
    "print(len(features_ts), len(samples_test), len(labels_test))\n",
    "print(len(features_ts2), len(samples_test2), len(labels_test2))"
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Fit on the training data and transform the test set into the learned space"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
Alessia Marcolini's avatar
Alessia Marcolini committed
192
193
194
    "mapper = umap.UMAP(\n",
    "    n_neighbors=40, min_dist=0.01, n_components=2, metric=\"euclidean\"\n",
    ").fit(train_data)\n",
195
196
197
198
199
200
201
202
203
204
205
    "test_embedding = mapper.transform(test_data)\n",
    "test2_embedding = mapper.transform(test2_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Check\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
206
    "len(mapper.embedding_), len(test_embedding), len(test2_embedding)"
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Plot UMAP 2D projection"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "TOOLTIPS = [\n",
    "    (\"index\", \"$index\"),\n",
    "    (\"(x,y)\", \"($x, $y)\"),\n",
    "    (\"desc\", \"@desc\"),\n",
    "]\n",
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
228
    "mycols = colorblind[\"Colorblind\"][4]\n",
229
230
    "myclasses = pd.unique(labels_tr).tolist()\n",
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
231
232
233
234
235
236
237
    "p = figure(\n",
    "    plot_width=1200,\n",
    "    plot_height=1200,\n",
    "    tooltips=TOOLTIPS,\n",
    "    tools=\"save\",\n",
    "    toolbar_location=\"left\",\n",
    ")\n",
238
239
240
241
242
243
    "\n",
    "\n",
    "p.title.align = \"center\"\n",
    "p.title.text_color = \"black\"\n",
    "p.title.text_font_size = \"25px\"\n",
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
244
    "size = 12\n",
Nicole Bussola's avatar
Nicole Bussola committed
245
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
246
    "for col, theclass in zip(mycols, myclasses):\n",
247
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
248
    "    idx_tr = np.where(np.array(labels_tr) == theclass)[0].tolist()\n",
249
    "    samples_train = np.expand_dims(samples_tr[idx_tr,], axis=1)\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
    "    data_tr = np.hstack((mapper.embedding_[idx_tr,], samples_train))\n",
    "    df_tr = pd.DataFrame(data_tr, columns=[\"x\", \"y\", \"sample\"])\n",
    "\n",
    "    source_tr = ColumnDataSource(\n",
    "        data=dict(x=df_tr[\"x\"], y=df_tr[\"y\"], desc=df_tr[\"sample\"])\n",
    "    )\n",
    "    p.circle(\n",
    "        x=\"x\",\n",
    "        y=\"y\",\n",
    "        size=size,\n",
    "        source=source_tr,\n",
    "        color=col,\n",
    "        alpha=0.8,\n",
    "        legend=str(theclass),\n",
    "    )\n",
    "\n",
    "    idx_ts = np.where(np.array(labels_test) == theclass)[0].tolist()\n",
267
    "    samples_ts = np.expand_dims(samples_test[idx_ts,], axis=1)\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
268
269
    "    data_ts = np.hstack((test_embedding[idx_ts,], samples_ts))\n",
    "    df_ts = pd.DataFrame(data_ts, columns=[\"x\", \"y\", \"sample\"])\n",
270
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
271
272
273
274
    "    source_ts = ColumnDataSource(\n",
    "        data=dict(x=df_ts[\"x\"], y=df_ts[\"y\"], desc=df_ts[\"sample\"])\n",
    "    )\n",
    "    p.triangle(x=\"x\", y=\"y\", size=size, source=source_ts, color=col, alpha=0.8)\n",
275
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
276
    "    idx_ts2 = np.where(np.array(labels_test2) == theclass)[0].tolist()\n",
277
    "    samples_ts2 = np.expand_dims(samples_test2[idx_ts2,], axis=1)\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
278
279
280
281
282
283
284
    "    data_ts2 = np.hstack((test2_embedding[idx_ts2,], samples_ts2))\n",
    "    df_ts2 = pd.DataFrame(data_ts2, columns=[\"x\", \"y\", \"sample\"])\n",
    "\n",
    "    source_ts2 = ColumnDataSource(\n",
    "        data=dict(x=df_ts2[\"x\"], y=df_ts2[\"y\"], desc=df_ts2[\"sample\"])\n",
    "    )\n",
    "    p.diamond(x=\"x\", y=\"y\", size=size, source=source_ts2, color=col, alpha=0.8)\n",
285
286
287
288
289
290
291
292
293
294
295
296
    "\n",
    "\n",
    "p.add_tools(LassoSelectTool())\n",
    "p.add_tools(WheelZoomTool())\n",
    "p.legend.label_text_font_size = \"20pt\"\n",
    "p.yaxis.major_label_text_font_size = \"15pt\"\n",
    "p.xaxis.major_label_text_font_size = \"15pt\"\n",
    "\n",
    "\n",
    "p.add_tools(ZoomInTool())\n",
    "p.add_tools(ResetTool())\n",
    "p.add_tools(BoxZoomTool())\n",
Nicole Bussola's avatar
Nicole Bussola committed
297
    "p.legend.location = \"top_left\"\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
298
    "p.legend.click_policy = \"hide\"\n",
299
    "# p.title()\n",
Nicole Bussola's avatar
Nicole Bussola committed
300
301
    "\n",
    "if BEST:\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
302
    "    export_png(p, filename=f\"subtypes_INF_split{SPLIT}.png\")  # save the plot\n",
Nicole Bussola's avatar
Nicole Bussola committed
303
304
    "else:\n",
    "    export_png(p, filename=f\"subtypes_juXT_split{SPLIT}.png\")\n",
305
306
307
308
309
310
311
312
313
314
315
316
317
    "\n",
    "show(p)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Grid plot for all other splits"
   ]
  },
  {
   "cell_type": "code",
Alessia Marcolini's avatar
Alessia Marcolini committed
318
   "execution_count": null,
Nicole Bussola's avatar
Nicole Bussola committed
319
320
321
322
   "metadata": {},
   "outputs": [],
   "source": [
    "def range_with_ignore(start, stop, ignore):\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
323
    "    return np.concatenate([np.arange(start, ignore), np.arange(ignore + 1, stop)])"
Nicole Bussola's avatar
Nicole Bussola committed
324
325
326
327
328
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
329
   "metadata": {},
Nicole Bussola's avatar
Nicole Bussola committed
330
   "outputs": [],
331
   "source": [
Nicole Bussola's avatar
Nicole Bussola committed
332
    "BEST = False\n",
333
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
334
    "mycols = colorblind[\"Colorblind\"][4]\n",
335
    "plots = []\n",
Nicole Bussola's avatar
Nicole Bussola committed
336
    "size = 12\n",
337
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
338
    "for split in range_with_ignore(0, 10, SPLIT).tolist():\n",
339
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
340
341
342
    "    file_tr = f\"{PATH}/{split}/{layers}_tr.txt\"  # Fit UMAP\n",
    "    file_test = f\"{PATH}/{split}/{layers}_ts.txt\"  # test UMAP\n",
    "    file_test2 = f\"{PATH}/{split}/{layers}_ts2.txt\"  # test UMAP\n",
343
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
344
345
346
    "    features_train = pd.read_csv(file_tr, sep=\"\\t\", header=0, index_col=0)\n",
    "    features_test = pd.read_csv(file_test, sep=\"\\t\", header=0, index_col=0)\n",
    "    features_test2 = pd.read_csv(file_test2, sep=\"\\t\", header=0, index_col=0)\n",
347
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
348
349
350
351
    "    INF_feats = pd.read_csv(\n",
    "        f\"{PATH_RESULTS}/{split}/rSNFi/{layers}_ts_RandomForest_KBest_featurelist.txt\",\n",
    "        sep=\"\\t\",\n",
    "    )[\"FEATURE_NAME\"].values.tolist()\n",
352
353
354
355
356
357
358
359
360
361
362
    "\n",
    "    best_train = features_train[INF_feats]\n",
    "    best_test = features_test[INF_feats]\n",
    "    best_test2 = features_test2[INF_feats]\n",
    "\n",
    "    if BEST:\n",
    "        features_train = best_train\n",
    "        features_test = best_test\n",
    "        features_test2 = best_test2\n",
    "\n",
    "    samples_tr = features_train.index\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
363
364
365
    "    labels_tr = pd.read_csv(\n",
    "        f\"{PATH}/{split}/labels_{TASK}_tr.txt\", sep=\"\\t\", header=None\n",
    "    )[0].tolist()\n",
366
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
367
368
    "    features_train[\"labels\"] = labels_tr\n",
    "    labels_tr = features_train[\"labels\"]\n",
369
370
371
    "    features_tr = features_train[features_train.columns[:-1]].values\n",
    "\n",
    "    samples_test = features_test.index\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
372
373
374
    "    labels_test = pd.read_csv(\n",
    "        f\"{PATH}/{split}/labels_{TASK}_ts.txt\", sep=\"\\t\", header=None\n",
    "    )[0].tolist()\n",
375
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
376
377
    "    features_test[\"labels\"] = labels_test\n",
    "    labels_test = features_test[\"labels\"]\n",
378
379
380
    "    features_ts = features_test[features_test.columns[:-1]].values\n",
    "\n",
    "    samples_test2 = features_test2.index\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
381
382
383
    "    labels_test2 = pd.read_csv(\n",
    "        f\"{PATH}/{split}/labels_{TASK}_ts2.txt\", sep=\"\\t\", header=None\n",
    "    )[0].tolist()\n",
384
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
385
386
    "    features_test2[\"labels\"] = labels_test2\n",
    "    labels_test2 = features_test2[\"labels\"]\n",
387
388
389
390
391
392
    "    features_ts2 = features_test2[features_test2.columns[:-1]].values\n",
    "\n",
    "    train_data = features_tr\n",
    "    test_data = features_ts\n",
    "    test2_data = features_ts2\n",
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
393
394
395
    "    mapper = umap.UMAP(\n",
    "        n_neighbors=40, min_dist=0.01, n_components=2, metric=\"euclidean\"\n",
    "    ).fit(train_data)\n",
396
397
398
399
400
    "    test_embedding = mapper.transform(test_data)\n",
    "    test2_embedding = mapper.transform(test2_data)\n",
    "\n",
    "    myclasses = pd.unique(labels_tr).tolist()\n",
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
401
402
    "    p = figure(title=f\"split {split}\")\n",
    "    p.title.text_font_size = \"25pt\"\n",
403
404
405
406
407
    "\n",
    "    p.title.align = \"center\"\n",
    "    p.title.text_color = \"black\"\n",
    "    p.title.text_font_size = \"25px\"\n",
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
408
    "    for col, theclass in zip(mycols, myclasses):\n",
409
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
410
    "        idx_tr = np.where(np.array(labels_tr) == theclass)[0].tolist()\n",
411
    "        samples_train = np.expand_dims(samples_tr[idx_tr,], axis=1)\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
    "        data_tr = np.hstack((mapper.embedding_[idx_tr,], samples_train))\n",
    "        df_tr = pd.DataFrame(data_tr, columns=[\"x\", \"y\", \"sample\"])\n",
    "\n",
    "        source_tr = ColumnDataSource(\n",
    "            data=dict(x=df_tr[\"x\"], y=df_tr[\"y\"], desc=df_tr[\"sample\"])\n",
    "        )\n",
    "        p.circle(\n",
    "            x=\"x\",\n",
    "            y=\"y\",\n",
    "            size=size,\n",
    "            source=source_tr,\n",
    "            color=col,\n",
    "            alpha=0.8,\n",
    "            legend=str(theclass),\n",
    "        )\n",
    "\n",
    "        idx_ts = np.where(np.array(labels_test) == theclass)[0].tolist()\n",
429
    "        samples_ts = np.expand_dims(samples_test[idx_ts,], axis=1)\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
430
431
    "        data_ts = np.hstack((test_embedding[idx_ts,], samples_ts))\n",
    "        df_ts = pd.DataFrame(data_ts, columns=[\"x\", \"y\", \"sample\"])\n",
432
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
433
434
435
436
    "        source_ts = ColumnDataSource(\n",
    "            data=dict(x=df_ts[\"x\"], y=df_ts[\"y\"], desc=df_ts[\"sample\"])\n",
    "        )\n",
    "        p.triangle(x=\"x\", y=\"y\", size=size, source=source_ts, color=col, alpha=0.8)\n",
437
    "\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
438
    "        idx_ts2 = np.where(np.array(labels_test2) == theclass)[0].tolist()\n",
439
    "        samples_ts2 = np.expand_dims(samples_test2[idx_ts2,], axis=1)\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
440
441
442
443
444
445
446
447
448
    "        data_ts2 = np.hstack((test2_embedding[idx_ts2,], samples_ts2))\n",
    "        df_ts2 = pd.DataFrame(data_ts2, columns=[\"x\", \"y\", \"sample\"])\n",
    "\n",
    "        source_ts2 = ColumnDataSource(\n",
    "            data=dict(x=df_ts2[\"x\"], y=df_ts2[\"y\"], desc=df_ts2[\"sample\"])\n",
    "        )\n",
    "        p.diamond(x=\"x\", y=\"y\", size=size, source=source_ts2, color=col, alpha=0.8)\n",
    "\n",
    "    #     p.legend.location = \"bottom_left\"\n",
449
450
451
452
453
454
455
456
    "    p.legend.label_text_font_size = \"20pt\"\n",
    "    p.yaxis.major_label_text_font_size = \"15pt\"\n",
    "    p.xaxis.major_label_text_font_size = \"15pt\"\n",
    "    plots.append(p)"
   ]
  },
  {
   "cell_type": "code",
Nicole Bussola's avatar
Nicole Bussola committed
457
   "execution_count": null,
458
   "metadata": {},
Nicole Bussola's avatar
Nicole Bussola committed
459
   "outputs": [],
460
   "source": [
Alessia Marcolini's avatar
Alessia Marcolini committed
461
462
463
464
465
466
467
468
469
    "grid = gridplot(\n",
    "    [\n",
    "        [plots[0], plots[1], plots[2]],\n",
    "        [plots[3], plots[4], plots[5]],\n",
    "        [plots[6], plots[7], plots[8]],\n",
    "    ],\n",
    "    plot_width=1200,\n",
    "    plot_height=1200,\n",
    ")\n",
470
    "\n",
Nicole Bussola's avatar
Nicole Bussola committed
471
472
473
474
    "if BEST:\n",
    "    export_png(grid, filename=\"subtypes_INF_suppl.png\")\n",
    "else:\n",
    "    export_png(grid, filename=\"subtypes_juXT_suppl.png\")\n",
Alessia Marcolini's avatar
Alessia Marcolini committed
475
    "\n",
476
477
478
479
480
    "show(grid)"
   ]
  }
 ],
 "metadata": {
Alessia Marcolini's avatar
Alessia Marcolini committed
481
  "hide_input": false,
482
  "kernelspec": {
Alessia Marcolini's avatar
Alessia Marcolini committed
483
   "display_name": "Python 3.6.9 64-bit",
484
   "language": "python",
Alessia Marcolini's avatar
Alessia Marcolini committed
485
   "name": "python36964bita08c9a394aa84e7d9622460ca3efcae1"
486
487
488
489
490
491
492
493
494
495
496
497
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.9"
Alessia Marcolini's avatar
Alessia Marcolini committed
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
527
528
529
530
531
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}