[fba4b6]: / WCEBleedGen.ipynb

Download this file

319 lines (319 with data), 11.0 kB

{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "provenance": [],
      "authorship_tag": "ABX9TyOK/tfXqNBMTnLqQ6UmtznZ",
      "include_colab_link": true
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/2004ARYAN/WCEBleedGen-AI/blob/main/WCEBleedGen.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 1,
      "metadata": {
        "id": "6-HEO5ZZjkJl"
      },
      "outputs": [],
      "source": [
        "import zipfile"
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "# Path to the training dataset zip file\n",
        "train_dataset_path = '/content/sample_data/WCEBleedGen.zip'\n",
        "\n",
        "# Path to the testing dataset zip file\n",
        "test_dataset_path = '/content/sample_data/Auto-WCEBleedGen Challenge Test Dataset.zip'\n",
        "\n",
        "# Extract the training dataset\n",
        "with zipfile.ZipFile(train_dataset_path, 'r') as zip_ref:\n",
        "    zip_ref.extractall('/content/sample_data')\n",
        "\n",
        "# Extract the testing dataset\n",
        "with zipfile.ZipFile(test_dataset_path, 'r') as zip_ref:\n",
        "    zip_ref.extractall('/content/sample_data')"
      ],
      "metadata": {
        "id": "m5dvbzIJjnFn"
      },
      "execution_count": 2,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "# Organize the data directories\n",
        "train_data_dir = '/content/sample_data/train_dataset'\n",
        "test_data_dir = '/content/sample_data/test_dataset'\n"
      ],
      "metadata": {
        "id": "zqm-6MH9jnH7"
      },
      "execution_count": 3,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "import os\n",
        "\n",
        "# Set the path to the extracted training dataset directory\n",
        "extracted_data_dir = '/content/sample_data/WCEBleedGen'\n",
        "\n",
        "# Define the path to your training and testing data within the extracted directory\n",
        "train_data_dir = os.path.join(extracted_data_dir, '/content/sample_data/WCEBleedGen')\n",
        "test_data_dir = os.path.join(extracted_data_dir, '/content/sample_data/Auto-WCEBleedGen Challenge Test Dataset')\n"
      ],
      "metadata": {
        "id": "LQ7GMenHjnM4"
      },
      "execution_count": 6,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "import tensorflow as tf\n",
        "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
        "\n",
        "\n",
        "\n",
        "# Create data generators for training and testing\n",
        "train_datagen = ImageDataGenerator(rescale=1./255, validation_split=0.2)\n",
        "train_generator = train_datagen.flow_from_directory(\n",
        "    train_data_dir,\n",
        "    target_size=(150, 150),\n",
        "    batch_size=32,\n",
        "    class_mode='binary',\n",
        "    subset='training'  # For training data\n",
        ")\n",
        "\n",
        "validation_generator = train_datagen.flow_from_directory(\n",
        "    train_data_dir,\n",
        "    target_size=(150, 150),\n",
        "    batch_size=32,\n",
        "    class_mode='binary',\n",
        "    subset='validation'  # For validation data\n",
        ")\n",
        "\n",
        "\n"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "l6dPYUqAjnPO",
        "outputId": "8465f9e5-ce7f-4ed2-a637-9052bf110b2e"
      },
      "execution_count": 8,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Found 4190 images belonging to 2 classes.\n",
            "Found 1046 images belonging to 2 classes.\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "import tensorflow as tf\n",
        "from tensorflow.keras.models import Sequential\n",
        "from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense\n",
        "from tensorflow.keras.optimizers import Adam\n",
        "\n",
        "# Define and compile your model\n",
        "model = Sequential([\n",
        "    Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)),\n",
        "    MaxPooling2D(2, 2),\n",
        "    Conv2D(64, (3, 3), activation='relu'),\n",
        "    MaxPooling2D(2, 2),\n",
        "    Flatten(),\n",
        "    Dense(128, activation='relu'),\n",
        "    Dense(1, activation='sigmoid')\n",
        "])\n",
        "\n",
        "model.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.001), metrics=['accuracy'])\n",
        "\n",
        "# Train your model\n",
        "history = model.fit(\n",
        "    train_generator,\n",
        "    steps_per_epoch=len(train_generator),\n",
        "    epochs=10,\n",
        "    validation_data=validation_generator,\n",
        "    validation_steps=len(validation_generator)\n",
        ")\n"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "b5DsWJjI1cao",
        "outputId": "955fc3b8-8216-4220-f95d-5c5da2fbed28"
      },
      "execution_count": 9,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Epoch 1/10\n",
            "131/131 [==============================] - 181s 1s/step - loss: 0.3123 - accuracy: 0.8814 - val_loss: 0.0428 - val_accuracy: 0.9895\n",
            "Epoch 2/10\n",
            "131/131 [==============================] - 160s 1s/step - loss: 0.0743 - accuracy: 0.9737 - val_loss: 0.0466 - val_accuracy: 0.9847\n",
            "Epoch 3/10\n",
            "131/131 [==============================] - 171s 1s/step - loss: 0.0316 - accuracy: 0.9883 - val_loss: 0.1191 - val_accuracy: 0.9713\n",
            "Epoch 4/10\n",
            "131/131 [==============================] - 158s 1s/step - loss: 0.0283 - accuracy: 0.9928 - val_loss: 0.1047 - val_accuracy: 0.9818\n",
            "Epoch 5/10\n",
            "131/131 [==============================] - 159s 1s/step - loss: 0.0123 - accuracy: 0.9976 - val_loss: 0.0974 - val_accuracy: 0.9837\n",
            "Epoch 6/10\n",
            "131/131 [==============================] - 161s 1s/step - loss: 0.0023 - accuracy: 0.9995 - val_loss: 0.1689 - val_accuracy: 0.9771\n",
            "Epoch 7/10\n",
            "131/131 [==============================] - 161s 1s/step - loss: 0.0018 - accuracy: 0.9998 - val_loss: 0.0591 - val_accuracy: 0.9895\n",
            "Epoch 8/10\n",
            "131/131 [==============================] - 159s 1s/step - loss: 0.0171 - accuracy: 0.9947 - val_loss: 0.0362 - val_accuracy: 0.9914\n",
            "Epoch 9/10\n",
            "131/131 [==============================] - 158s 1s/step - loss: 0.0045 - accuracy: 0.9986 - val_loss: 0.0446 - val_accuracy: 0.9914\n",
            "Epoch 10/10\n",
            "131/131 [==============================] - 169s 1s/step - loss: 0.0492 - accuracy: 0.9862 - val_loss: 0.0800 - val_accuracy: 0.9895\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "test_data_dir = '/content/sample_data/Auto-WCEBleedGen Challenge Test Dataset'  # Update with the path to your testing dataset\n",
        "test_datagen = ImageDataGenerator(rescale=1./255)\n",
        "test_generator = test_datagen.flow_from_directory(\n",
        "    test_data_dir,\n",
        "    target_size=(150, 150),\n",
        "    batch_size=32,\n",
        "    class_mode='binary'\n",
        ")\n"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "-olZuu6R1cgV",
        "outputId": "2fc323c3-5dbc-4864-d751-ed28aab4c1ce"
      },
      "execution_count": 11,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Found 564 images belonging to 2 classes.\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "predictions = model.predict(test_generator)\n"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "RkVc-2jA1ci8",
        "outputId": "071bbfdd-1ea3-4b5f-d590-f30b62b40f85"
      },
      "execution_count": 12,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "18/18 [==============================] - 7s 385ms/step\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "import pandas as pd\n",
        "import smtplib\n",
        "\n",
        "\n",
        "# Step 1: Evaluation Metrics\n",
        "# Calculate and gather evaluation metrics here\n",
        "# Replace these with your actual evaluation metrics\n",
        "accuracy = 0.95\n",
        "recall = 0.90\n",
        "f1_score = 0.92\n",
        "iou = 0.80\n",
        "ap = 0.85\n",
        "map_score = 0.82\n",
        "\n",
        "# Step 2:  Excel Sheet Preparation\n",
        "# Create a pandas DataFrame with your testing results\n",
        "# Replace these with your actual image IDs and predicted labels\n",
        "image_ids = ['Image1', 'Image2', 'Image3', 'Image4', 'Image5']\n",
        "predicted_labels = [1, 0, 1, 0, 1]\n",
        "\n",
        "results = {\n",
        "    'Image name': image_ids,\n",
        "    'Predicted class label': predicted_labels,\n",
        "    'Predicted coordinates of bounding box': ['x1, y1, x2, y2'] * len(image_ids),\n",
        "    'Confidence level': [0.95] * len(image_ids)\n",
        "}\n",
        "\n",
        "df = pd.DataFrame(results)\n",
        "\n",
        "# Save the DataFrame to an Excel file\n",
        "df.to_excel('auto_wce_bleedgen_results.xlsx', index=False)"
      ],
      "metadata": {
        "id": "28eA4pqs1clB"
      },
      "execution_count": 30,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [],
      "metadata": {
        "id": "z3Pgl-AX1cpK"
      },
      "execution_count": null,
      "outputs": []
    }
  ]
}