a b/occupy.py
1
import os; import torch
2
3
def occumpy_mem(cuda_device):
4
    def check_mem(cuda_device):
5
        devices_info = os.popen('"/usr/bin/nvidia-smi" --query-gpu=memory.total,memory.used --format=csv,nounits,noheader').read().strip().split("\n")
6
        total, used = devices_info[int(cuda_device)].split(',')
7
        return total,used
8
    total, used = check_mem(cuda_device)
9
    total = int(total)
10
    used = int(used)
11
    max_mem = int(total * 0.85)
12
    block_mem = max_mem - used
13
    x = torch.FloatTensor(256,1024,block_mem).to(torch.device(f"cuda:{cuda_device}"))
14
    del x
15
16
occumpy_mem('0')
17
occumpy_mem('1')