1019 lines (1018 with data), 30.7 kB
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Single-cell RNA data on Minimal Residual disease for melanoma!\n",
""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Learning Outcome\n",
"\n",
"After this session you will be able to load and pre-process your multi-omics data to generate lower-dimensional embeddings. "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Quality Control\n",
""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Generate lower-dimensional embeddings\n",
"\n",
"We will perform dimensionality reduction and generate lower-dimensional embeddings of the single-cell RNAseq data using two methods:\n",
"* PCA (https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html)\n",
"* Neural Networks (https://pytorch.org/tutorials/beginner/basics/buildmodel_tutorial.html)\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import pandas as pd\n",
"import matplotlib\n",
"import matplotlib.pyplot as plt\n",
"import seaborn as sns \n",
"sns.set_style('dark')\n",
"\n",
"# ====== Scikit-learn imports ======\n",
"\n",
"from sklearn.svm import SVC\n",
"from sklearn.metrics import (\n",
" auc,\n",
" roc_curve,\n",
" ConfusionMatrixDisplay,\n",
" f1_score,\n",
" balanced_accuracy_score,\n",
")\n",
"from sklearn.preprocessing import StandardScaler, LabelBinarizer\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.decomposition import PCA\n",
"\n",
"## ====== Torch imports ======\n",
"import torch\n",
"import torch.nn as nn \n",
"import torch.nn.functional as F\n",
"import torch.optim as optim\n",
"from lightning.pytorch.utilities.types import OptimizerLRScheduler\n",
"import torch.utils.data\n",
"from torch.utils.data import TensorDataset, DataLoader\n",
"\n",
"import lightning, lightning.pytorch.loggers"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Read data"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"(369, 2002)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/tmp/ipykernel_3025929/715312502.py:7: SettingWithCopyWarning: \n",
"A value is trying to be set on a copy of a slice from a DataFrame.\n",
"Try using .loc[row_indexer,col_indexer] = value instead\n",
"\n",
"See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
" labels_filtered['Labels'] = [x.split(' ')[1] for x in labels_filtered['Labels']]\n"
]
}
],
"source": [
"df = pd.read_csv(\"../data/GSE116237_forQ.csv\")\n",
"print(df.shape)\n",
"labels = pd.read_csv(\"../data/GSE116237 filtered labels.csv\")\n",
"labels.drop('Unnamed: 0', axis=1, inplace=True)\n",
"labels.columns = ['Cells', 'Labels']\n",
"labels_filtered = labels[labels['Cells'].isin(list(df['Cells']))]\n",
"labels_filtered['Labels'] = [x.split(' ')[1] for x in labels_filtered['Labels']]\n",
"df = pd.merge(df, labels_filtered, on='Cells', how='inner')\n",
"df['Labels'] = df['Labels'].map({'T0': 0, 'phase2': 1})\n",
"y = np.array(df['Labels'])\n",
"X = df[df.columns[1:-1]].values\n",
"\n",
"num_samples = X.shape[0]\n",
"num_feats = X.shape[1]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Split into training and testing"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"X_working, X_held_out, y_working, y_held_out = train_test_split(X,\n",
" y,\n",
" train_size=0.8,\n",
" shuffle=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### PCA"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Training data dimensions: (295, 2001)\n",
"Embedded train dimensions: (295, 5)\n",
"Testing data dimensions: (74, 2001)\n",
"Embedded test dimensions: (74, 5)\n"
]
}
],
"source": [
"output_dim = 5\n",
"pca = PCA(n_components=output_dim)\n",
"scaler = StandardScaler()\n",
"X_train_scaled = scaler.fit_transform(X_working)\n",
"embedding_train = pca.fit_transform(X_train_scaled)\n",
"X_test_scaled = scaler.fit_transform(X_held_out)\n",
"embedding_test = pca.fit_transform(X_test_scaled)\n",
"print(\"Training data dimensions: \", X_working.shape)\n",
"print(\"Embedded train dimensions: \", embedding_train.shape)\n",
"print(\"Testing data dimensions: \", X_held_out.shape)\n",
"print(\"Embedded test dimensions: \", embedding_test.shape)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Neural Networks"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Define the network in Pytorch lightning"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"embedder = torch.nn.Sequential(\n",
" nn.Linear(num_feats,256),\n",
" nn.LeakyReLU(),\n",
" nn.Linear(256,64),\n",
" nn.LeakyReLU(),\n",
" nn.Linear(64,32), \n",
" nn.LeakyReLU(),\n",
" nn.Linear(32,16), \n",
" nn.LeakyReLU(),\n",
" nn.Linear(16,output_dim),\n",
" nn.LeakyReLU(), \n",
" ) \n",
"\n",
"classifier = torch.nn.Sequential(\n",
" nn.Linear(output_dim,1),\n",
" nn.Softmax(dim=1)\n",
" )\n",
"class BinaryClassifierModel(lightning.LightningModule):\n",
" def __init__(self, embedder, classifier, input_dim,learning_rate=1e-3):\n",
" super().__init__()\n",
" self.input_dim = input_dim\n",
" self.embedder = embedder\n",
" self.classifier = classifier\n",
" self.learning_rate = learning_rate\n",
" self.loss_fun = nn.BCELoss()\n",
" \n",
" def train_dataloader(self):\n",
" return DataLoader(self.train_data, batch_size=32, shuffle=True)\n",
"\n",
" def val_dataloader(self):\n",
" return DataLoader(self.val_data, batch_size=32) # No shuffling for validation\n",
" \n",
" def forward(self, X): \n",
" x = self.embedder(X)\n",
" x = self.classifier(x) \n",
" return x \n",
" \n",
" def training_step(self, batch, batch_idx):\n",
" x, y = batch\n",
" y = y.unsqueeze(1)\n",
" y_float = y.float()\n",
" x_embedder = self.embedder(x)\n",
" y_hat = self.classifier(x_embedder)\n",
" #y_hat = torch.argmax(y_hat, dim=1)\n",
" loss = self.loss_fun(y_hat, y_float)\n",
" self.log(\"train_loss\", loss, \n",
" prog_bar=True, \n",
" logger=True)\n",
" return loss\n",
" \n",
" def validation_step(self, batch, batch_idx):\n",
" x, y = batch\n",
" y = y.unsqueeze(1)\n",
" y_float = y.float()\n",
" x_embedder = self.embedder(x)\n",
" y_hat = self.classifier(x_embedder)\n",
" val_loss = self.loss_fun(y_hat, y_float)\n",
" f1score = f1_score(y_hat, y)\n",
" #print(f1score)\n",
" #print(val_loss)\n",
" self.log(\"val_loss\", val_loss, prog_bar=False, logger=True) # Log on epoch end\n",
" return val_loss\n",
"\n",
" \n",
" def configure_optimizers(self):\n",
" return torch.optim.Adam(self.classifier.parameters(), lr=self.learning_rate)\n",
" \n",
"def prepare_data(X_train, y_train, X_val, y_val):\n",
" # Assuming X and y are NumPy arrays\n",
"\n",
" train_data = TensorDataset(torch.tensor(X_train, dtype=torch.float32), \n",
" torch.tensor(y_train, dtype=torch.float32))\n",
" val_data = TensorDataset(torch.tensor(X_val, dtype=torch.float32), \n",
" torch.tensor(y_val, dtype=torch.float32))\n",
" \n",
" return train_data, val_data "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Compute the embeddings using the network and get lower dimension embeddings in the form of matrices for downstream QML tasks"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"GPU available: False, used: False\n",
"TPU available: False, using: 0 TPU cores\n",
"HPU available: False, using: 0 HPUs\n",
"2024-07-12 13:12:26.638257: I tensorflow/core/util/port.cc:113] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n",
"2024-07-12 13:12:26.651571: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:479] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
"2024-07-12 13:12:26.671424: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:10575] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
"2024-07-12 13:12:26.671449: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1442] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
"2024-07-12 13:12:26.684350: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
"To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
"2024-07-12 13:12:27.415951: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
"\n",
" | Name | Type | Params | Mode \n",
"--------------------------------------------------\n",
"0 | embedder | Sequential | 531 K | train\n",
"1 | classifier | Sequential | 6 | train\n",
"2 | loss_fun | BCELoss | 0 | train\n",
"--------------------------------------------------\n",
"531 K Trainable params\n",
"0 Non-trainable params\n",
"531 K Total params\n",
"2.127 Total estimated model params size (MB)\n"
]
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "afd281a3a6ac4be6ae161d27f15c8e56",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Sanity Checking: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/abose/QMLOmics/SessionIII_MultiOmics/.venv/lib/python3.10/site-packages/lightning/pytorch/trainer/connectors/data_connector.py:424: The 'val_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=191` in the `DataLoader` to improve performance.\n",
"/home/abose/QMLOmics/SessionIII_MultiOmics/.venv/lib/python3.10/site-packages/lightning/pytorch/trainer/connectors/data_connector.py:424: The 'train_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=191` in the `DataLoader` to improve performance.\n",
"/home/abose/QMLOmics/SessionIII_MultiOmics/.venv/lib/python3.10/site-packages/lightning/pytorch/loops/fit_loop.py:298: The number of training batches (9) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n"
]
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "2feb697f40ce4fe1a7d2f98f2e328f92",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Training: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "1866594d0bde4a43aa631377b0d8df6b",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "50fbcc83f53f442cbad46e38913a218b",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "41d8b1ed01d2431696fce99afae9dc71",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "902026fc5c1347b1b4f150d49313c067",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "26bfff9443aa4a2697c707c06b246e2a",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "eaa3399fcdf147bf84b357300828c816",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "1fbaed0d8f4f4575ad03ad4dd78fcd9e",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "1d38ac2789784e20a612a71e38fda98f",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "3ed88b70b68e4421b9be0774799d7a11",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "410e31b0fc8b4947b617d00d181ed5fe",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "3c809906d16c4181adbf366e46774a23",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "9a2c0a33fb524a599cb094dc3a690484",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "0cb0d96880a64b4d94c86ab36f4029cd",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "ae97bce46fbe4f128928d3d06ce481b0",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "b35824fd78e843dd9cd332a139636f4c",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "5da6734d8a90481a9463bfce99e5f786",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "6b36092d92594a59967e21f0ff258005",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "e9f9bd47d5d64909916996ee563c55ce",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "a309af8ebdb043f686d7fca5002b7fc5",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "5916500305484ddba658d94c139f0ce2",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "29397c5350bd4fcca1e3e6ec15289f16",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "0b973052d23d4633b7ad8c857ecab896",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "800621fcc3d447c9bb9aae4612469fe3",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "589ab988ed3a4a9191e91ceeffd95bbb",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "8ab7fdd5b30a4d78a61037b76843c883",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "6953b322b6bb48989d80fcd563914d96",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "b9695946233d4e1d9f67e68bf459b2a4",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "667a451df9eb49ceaece4de0780fca12",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "a938f5b709934ed6a3c46843276d9d19",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "21272ee9f2934f699b97b3cc84f90eef",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "949bd80e3bb9469e8496eb333c4efa79",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "8f06b68975cb4cdea69328626b62d0ce",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "2d4486c1d5094d419cea9b4249ee67ba",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "43f06e9af03549d48c56074332d1b0ce",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "4680640dc1904e7d87c1d02cb817b8bf",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "92ef38a4c8314805a3c27ed94929b46f",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "5b65eb5da1ad451fb01235fc22d83478",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "46cf08a04d00430cb780c555189c56aa",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "19dc01567a744838a98b2ae6ea833c11",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "d4e9d6f7d8214b47ad0beb604847d952",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Validation: | | 0/? [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"`Trainer.fit` stopped: `max_epochs=40` reached.\n"
]
}
],
"source": [
"f1s = []\n",
"embeddings_train = []\n",
"embeddings_test = []\n",
"train_labels = []\n",
"test_labels = [] \n",
"\n",
"num_iter = 1\n",
"for i in range(num_iter): \n",
"\n",
" X_train, X_test, y_train, y_test = train_test_split(X_working,\n",
" y_working,\n",
" train_size=0.9,\n",
" shuffle=True)\n",
"\n",
" num_epochs = 40\n",
" model = BinaryClassifierModel(embedder, classifier, input_dim=num_feats)\n",
" model.train_data, model.val_data = prepare_data(X_train, y_train, X_test, y_test) # Prepare data for training\n",
" logger = lightning.pytorch.loggers.TensorBoardLogger(save_dir=\".\",name=\"original_classifier\")\n",
" # Train the model\n",
" trainer = lightning.Trainer(max_epochs=num_epochs, \n",
" logger=logger) # Adjust progress bar refresh rate as needed\n",
" trainer.fit(model)\n",
" model.eval()\n",
" embedded_test = model.embedder(torch.tensor(X_test, dtype=torch.float32))\n",
" y_pred = model.classifier(embedded_test)\n",
" y_pred_proba = y_pred.detach().cpu().numpy()\n",
" y_pred_class = np.round(y_pred_proba)\n",
"\n",
" f1 = f1_score(y_test, y_pred_class)\n",
" f1s.append(f1)\n",
" \n",
" embedded_train = model.embedder(torch.tensor(X_train, dtype=torch.float32)).detach().numpy()\n",
" embeddings_train.append(embedded_train)\n",
" embeddings_test.append(embedded_test.detach().numpy())\n",
" train_labels.append(y_train)\n",
" test_labels.append(y_test)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Save the embeddings"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"fname = \"MRD\"\n",
"\n",
"for i,x in enumerate(embeddings_train): \n",
" fname_train = fname + \"_iter\" + str(i) + \"_train_embedding\"\n",
" np.save(f\"checkpoints/{fname}/{fname_train}\", x)\n",
"\n",
"for i,x in enumerate(train_labels): \n",
" fname_train_y = fname + \"_iter\" + str(i) + \"_train_labels\"\n",
" np.save(f\"checkpoints/{fname}/{fname_train_y}\", x)\n",
"\n",
"for i,x in enumerate(embeddings_test): \n",
" fname_test = fname + \"_iter\" + str(i) + \"_test_embedding\"\n",
" np.save(f\"checkpoints/{fname}/{fname_test}\", x)\n",
" \n",
"for i,x in enumerate(test_labels): \n",
" fname_test_y = fname + \"_iter\" + str(i) + \"_test_labels\"\n",
" np.save(f\"checkpoints/{fname}/{fname_test_y}\", x)\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
}
},
"nbformat": 4,
"nbformat_minor": 2
}