--- a
+++ b/AttentonUnet.ipynb
@@ -0,0 +1,236 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Error loading .DS_Store or 0655[0]_47.png: cannot identify image file <_io.BytesIO object at 0x35adee660>. Skipping...\n",
+      "Epoch 1/20\n",
+      "\u001b[1m193/193\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m384s\u001b[0m 2s/step - accuracy: 0.9061 - loss: 0.2485 - val_accuracy: 0.8808 - val_loss: 0.3486\n",
+      "Epoch 2/20\n",
+      "\u001b[1m193/193\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m384s\u001b[0m 2s/step - accuracy: 0.9415 - loss: 0.1394 - val_accuracy: 0.8412 - val_loss: 0.4048\n",
+      "Epoch 3/20\n",
+      "\u001b[1m193/193\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m378s\u001b[0m 2s/step - accuracy: 0.9457 - loss: 0.1280 - val_accuracy: 0.8718 - val_loss: 0.4388\n",
+      "Epoch 4/20\n",
+      "\u001b[1m193/193\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m385s\u001b[0m 2s/step - accuracy: 0.9491 - loss: 0.1193 - val_accuracy: 0.8620 - val_loss: 0.4341\n",
+      "Epoch 5/20\n",
+      "\u001b[1m193/193\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m378s\u001b[0m 2s/step - accuracy: 0.9492 - loss: 0.1185 - val_accuracy: 0.8636 - val_loss: 0.5675\n",
+      "Epoch 6/20\n",
+      "\u001b[1m193/193\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m384s\u001b[0m 2s/step - accuracy: 0.9515 - loss: 0.1134 - val_accuracy: 0.8706 - val_loss: 0.5460\n",
+      "Epoch 7/20\n",
+      "\u001b[1m193/193\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m384s\u001b[0m 2s/step - accuracy: 0.9568 - loss: 0.0998 - val_accuracy: 0.8562 - val_loss: 0.6479\n",
+      "Epoch 8/20\n",
+      "\u001b[1m193/193\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m382s\u001b[0m 2s/step - accuracy: 0.9572 - loss: 0.0983 - val_accuracy: 0.8637 - val_loss: 1.0583\n",
+      "Epoch 9/20\n",
+      "\u001b[1m193/193\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m391s\u001b[0m 2s/step - accuracy: 0.9601 - loss: 0.0928 - val_accuracy: 0.8689 - val_loss: 0.4872\n",
+      "Epoch 10/20\n",
+      "\u001b[1m193/193\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m385s\u001b[0m 2s/step - accuracy: 0.9616 - loss: 0.0885 - val_accuracy: 0.8676 - val_loss: 0.6407\n",
+      "Epoch 11/20\n",
+      "\u001b[1m193/193\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m373s\u001b[0m 2s/step - accuracy: 0.9648 - loss: 0.0807 - val_accuracy: 0.8683 - val_loss: 0.6889\n",
+      "Epoch 12/20\n",
+      "\u001b[1m193/193\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m377s\u001b[0m 2s/step - accuracy: 0.9663 - loss: 0.0786 - val_accuracy: 0.8550 - val_loss: 0.7435\n",
+      "Epoch 13/20\n",
+      "\u001b[1m193/193\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m374s\u001b[0m 2s/step - accuracy: 0.9703 - loss: 0.0703 - val_accuracy: 0.8677 - val_loss: 0.6834\n",
+      "Epoch 14/20\n",
+      "\u001b[1m193/193\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m373s\u001b[0m 2s/step - accuracy: 0.9712 - loss: 0.0665 - val_accuracy: 0.8694 - val_loss: 0.5149\n",
+      "Epoch 15/20\n",
+      "\u001b[1m193/193\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m379s\u001b[0m 2s/step - accuracy: 0.9716 - loss: 0.0672 - val_accuracy: 0.8633 - val_loss: 0.7259\n",
+      "Epoch 16/20\n",
+      "\u001b[1m193/193\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m395s\u001b[0m 2s/step - accuracy: 0.9748 - loss: 0.0594 - val_accuracy: 0.8736 - val_loss: 0.6896\n",
+      "Epoch 17/20\n",
+      "\u001b[1m193/193\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m380s\u001b[0m 2s/step - accuracy: 0.9767 - loss: 0.0545 - val_accuracy: 0.8695 - val_loss: 0.7535\n",
+      "Epoch 18/20\n",
+      "\u001b[1m193/193\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m389s\u001b[0m 2s/step - accuracy: 0.9773 - loss: 0.0532 - val_accuracy: 0.8664 - val_loss: 0.8831\n",
+      "Epoch 19/20\n",
+      "\u001b[1m193/193\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m376s\u001b[0m 2s/step - accuracy: 0.9781 - loss: 0.0512 - val_accuracy: 0.8720 - val_loss: 0.7170\n",
+      "Epoch 20/20\n",
+      "\u001b[1m193/193\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m384s\u001b[0m 2s/step - accuracy: 0.9790 - loss: 0.0487 - val_accuracy: 0.8707 - val_loss: 0.6628\n"
+     ]
+    }
+   ],
+   "source": [
+    "import tensorflow as tf\n",
+    "from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate, Activation, BatchNormalization, Add, Multiply\n",
+    "from tensorflow.keras.models import Model\n",
+    "import os\n",
+    "import numpy as np\n",
+    "from tensorflow.keras.preprocessing.image import load_img, img_to_array\n",
+    "\n",
+    "def attention_block(x, g, inter_channel):\n",
+    "    \"\"\"\n",
+    "    Attention Block: Refines encoder features based on decoder signals.\n",
+    "    x: Input tensor from the encoder (skip connection)\n",
+    "    g: Gating signal from the decoder (upsampled tensor)\n",
+    "    inter_channel: Number of intermediate channels (reduces computation)\n",
+    "    \"\"\"\n",
+    "    # 1x1 Convolution on input tensor\n",
+    "    theta_x = Conv2D(inter_channel, kernel_size=(1, 1), strides=(1, 1), padding='same')(x)\n",
+    "    # 1x1 Convolution on gating tensor\n",
+    "    phi_g = Conv2D(inter_channel, kernel_size=(1, 1), strides=(1, 1), padding='same')(g)\n",
+    "    \n",
+    "    # Add the transformed inputs and apply ReLU\n",
+    "    add_xg = Add()([theta_x, phi_g])\n",
+    "    relu_xg = Activation('relu')(add_xg)\n",
+    "    \n",
+    "    # Another 1x1 Convolution to generate attention coefficients\n",
+    "    psi = Conv2D(1, kernel_size=(1, 1), strides=(1, 1), padding='same')(relu_xg)\n",
+    "    # Sigmoid activation to normalize attention weights\n",
+    "    sigmoid_psi = Activation('sigmoid')(psi)\n",
+    "    \n",
+    "    # Multiply the input tensor with the attention weights\n",
+    "    return Multiply()([x, sigmoid_psi])\n",
+    "\n",
+    "def conv_block(x, filters):\n",
+    "    \"\"\"\n",
+    "    Convolutional Block: Apply two 3x3 convolutions followed by BatchNorm and ReLU.\n",
+    "    x: Input tensor\n",
+    "    filters: Number of output filters for the convolutions\n",
+    "    \"\"\"\n",
+    "    x = Conv2D(filters, kernel_size=(3, 3), padding='same')(x)\n",
+    "    x = BatchNormalization()(x)\n",
+    "    x = Activation('relu')(x)\n",
+    "    x = Conv2D(filters, kernel_size=(3, 3), padding='same')(x)\n",
+    "    x = BatchNormalization()(x)\n",
+    "    x = Activation('relu')(x)\n",
+    "    return x\n",
+    "\n",
+    "def attention_unet(input_shape, num_classes):\n",
+    "    \"\"\"\n",
+    "    Attention U-Net model architecture.\n",
+    "    input_shape: Shape of input images (H, W, C)\n",
+    "    num_classes: Number of output segmentation classes\n",
+    "    \"\"\"\n",
+    "    # Input layer for the images\n",
+    "    inputs = Input(input_shape)\n",
+    "    \n",
+    "    # Encoder (Downsampling path)\n",
+    "    c1 = conv_block(inputs, 64)              # First Conv Block\n",
+    "    p1 = MaxPooling2D((2, 2))(c1)            # Downsample by 2\n",
+    "    \n",
+    "    c2 = conv_block(p1, 128)                 # Second Conv Block\n",
+    "    p2 = MaxPooling2D((2, 2))(c2)            # Downsample by 2\n",
+    "    \n",
+    "    c3 = conv_block(p2, 256)                 # Third Conv Block\n",
+    "    p3 = MaxPooling2D((2, 2))(c3)            # Downsample by 2\n",
+    "    \n",
+    "    c4 = conv_block(p3, 512)                 # Fourth Conv Block\n",
+    "    p4 = MaxPooling2D((2, 2))(c4)            # Downsample by 2\n",
+    "    \n",
+    "    # Bottleneck (lowest level of the U-Net)\n",
+    "    c5 = conv_block(p4, 1024)\n",
+    "    \n",
+    "    # Decoder (Upsampling path)\n",
+    "    up6 = UpSampling2D((2, 2))(c5)           # Upsample\n",
+    "    att6 = attention_block(c4, up6, 512)     # Attention Block\n",
+    "    merge6 = concatenate([up6, att6], axis=-1)  # Concatenate features\n",
+    "    c6 = conv_block(merge6, 512)             # Conv Block after concatenation\n",
+    "    \n",
+    "    up7 = UpSampling2D((2, 2))(c6)\n",
+    "    att7 = attention_block(c3, up7, 256)\n",
+    "    merge7 = concatenate([up7, att7], axis=-1)\n",
+    "    c7 = conv_block(merge7, 256)\n",
+    "    \n",
+    "    up8 = UpSampling2D((2, 2))(c7)\n",
+    "    att8 = attention_block(c2, up8, 128)\n",
+    "    merge8 = concatenate([up8, att8], axis=-1)\n",
+    "    c8 = conv_block(merge8, 128)\n",
+    "    \n",
+    "    up9 = UpSampling2D((2, 2))(c8)\n",
+    "    att9 = attention_block(c1, up9, 64)\n",
+    "    merge9 = concatenate([up9, att9], axis=-1)\n",
+    "    c9 = conv_block(merge9, 64)\n",
+    "    \n",
+    "    # Output layer for segmentation\n",
+    "    outputs = Conv2D(num_classes, (1, 1), activation='softmax' if num_classes > 1 else 'sigmoid')(c9)\n",
+    "    \n",
+    "    # Define the model\n",
+    "    model = Model(inputs=inputs, outputs=outputs)\n",
+    "    return model\n",
+    "\n",
+    "# Function to load and preprocess images and masks\n",
+    "def load_data(image_dir, mask_dir, image_size):\n",
+    "    \"\"\"\n",
+    "    Load and preprocess images and masks for training.\n",
+    "    image_dir: Path to the directory containing input images\n",
+    "    mask_dir: Path to the directory containing segmentation masks\n",
+    "    image_size: Tuple specifying the size (height, width) to resize the images and masks\n",
+    "    \"\"\"\n",
+    "    images = []\n",
+    "    masks = []\n",
+    "    image_files = sorted(os.listdir(image_dir))\n",
+    "    mask_files = sorted(os.listdir(mask_dir))\n",
+    "    \n",
+    "    for img_file, mask_file in zip(image_files, mask_files):\n",
+    "        try:\n",
+    "            # Load and preprocess images\n",
+    "            img_path = os.path.join(image_dir, img_file)\n",
+    "            mask_path = os.path.join(mask_dir, mask_file)\n",
+    "            \n",
+    "            img = load_img(img_path, target_size=image_size)  # Resize image\n",
+    "            mask = load_img(mask_path, target_size=image_size, color_mode='grayscale')  # Resize mask\n",
+    "            \n",
+    "            # Convert to numpy arrays and normalize\n",
+    "            img = img_to_array(img) / 255.0\n",
+    "            mask = img_to_array(mask) / 255.0\n",
+    "            mask = np.round(mask)  # Ensure masks are binary\n",
+    "            \n",
+    "            images.append(img)\n",
+    "            masks.append(mask)\n",
+    "        except Exception as e:\n",
+    "            print(f\"Error loading {img_file} or {mask_file}: {e}. Skipping...\")\n",
+    "    \n",
+    "    return np.array(images), np.array(masks)\n",
+    "\n",
+    "# Example usage\n",
+    "if __name__ == \"__main__\":\n",
+    "    # Load data\n",
+    "    image_dir = \"./images/\"  # Replace with your image directory\n",
+    "    mask_dir = \"./masks/\"    # Replace with your mask directory\n",
+    "    image_size = (128, 128)       # Resize all images to 128x128\n",
+    "    images, masks = load_data(image_dir, mask_dir, image_size)\n",
+    "    \n",
+    "    # Define the model\n",
+    "    model = attention_unet(input_shape=(128, 128, 3), num_classes=1)\n",
+    "    \n",
+    "    # Compile the model\n",
+    "    model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n",
+    "    \n",
+    "    # Train the model\n",
+    "    model.fit(images, masks, batch_size=8, epochs=20, validation_split=0.1)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "base",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.12.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}