Download this file

1226 lines (935 with data), 43.0 kB

   1
   2
   3
   4
   5
   6
   7
   8
   9
  10
  11
  12
  13
  14
  15
  16
  17
  18
  19
  20
  21
  22
  23
  24
  25
  26
  27
  28
  29
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import pysam
import numpy as np
import os
import pickle
import gzip
import pandas as pd
import multiprocessing
from singlecellmultiomics.bamProcessing import get_contig_sizes, get_contig_size, get_contigs, get_samples_from_bam
from statsmodels.nonparametric.smoothers_lowess import lowess
from datetime import datetime
from itertools import chain
from more_itertools import windowed
from typing import Generator
from singlecellmultiomics.methylation import MethylationCountMatrix
from pysamiterators import CachedFasta
from pysam import FastaFile
from singlecellmultiomics.utils import reverse_complement, is_main_chromosome, pool_wrapper
from collections import defaultdict, Counter
from itertools import product
from singlecellmultiomics.bamProcessing.bamFunctions import mate_iter
from multiprocessing import Pool
from singlecellmultiomics.pyutils import sorted_slice
def _generate_count_dict(args):
"""
Obtain counts for a bam path, given a bin size and region
args:
args (tuple) : bam_path, bin_size, contig, start, stop
"""
bam_path, bin_size, contig, start, stop, filter_function = args #reference_path = args
#reference_handle = pysam.FastaFile(reference_path)
#reference = CachedFasta(reference_handle)
cut_counts = defaultdict(Counter )
i = 0
with pysam.AlignmentFile(bam_path) as alignments:
for R1,R2 in mate_iter(alignments, contig=contig, start=start, stop=stop):
if filter_function is None:
if R1 is None or R1.is_duplicate or R1.is_qcfail:
continue
else:
if not filter_function(R1,R2):
continue
if not R1.has_tag('DS'):
cut_pos = R1.reference_end if R1.is_reverse else R1.reference_start
else:
cut_pos = int(R1.get_tag('DS'))
if cut_pos is None:
continue
if (start is not None and cut_pos<start) or (stop is not None and cut_pos>stop):
continue
if R1.has_tag('SM'):
sample = R1.get_tag('SM')
else:
sample = 'No_Sample'
bin_idx=int(cut_pos/bin_size)*bin_size
cut_counts[(contig,bin_idx)][sample] += 1
return cut_counts, contig, bam_path
# cell-> context -> obs
def _generate_count_dict_prefixed(args):
bam_path, bin_size, contig, start, stop, filter_function, alias, prefix = args #reference_path = args
cut_counts = defaultdict(Counter )
i = 0
with pysam.AlignmentFile(bam_path) as alignments:
for R1,R2 in mate_iter(alignments, contig=contig, start=start, stop=stop):
if filter_function is None:
if R1 is None or R1.is_duplicate or not R1.has_tag('DS') or R1.is_qcfail:
continue
else:
if not filter_function(R1,R2):
continue
cut_pos = R1.get_tag('DS')
sample = R1.get_tag('SM')
if (start is not None and cut_pos<start) or (stop is not None and cut_pos>stop):
continue
if prefix is not None:
sample = (prefix, sample)
if alias is not None:
cut_counts[alias][sample] += 1
else:
bin_idx=int(cut_pos/bin_size)*bin_size
cut_counts[(contig,bin_idx)][sample] += 1
return cut_counts, contig, bam_path
# Get TA fraction per cell
def _generate_ta_count_dict_prefixed(args):
bam_path, bin_size, contig, start, stop, filter_function, alias, prefix = args
cut_counts = defaultdict(Counter )
i = 0
with pysam.AlignmentFile(bam_path) as alignments:
for R1,R2 in mate_iter(alignments, contig=contig, start=start, stop=stop):
if R1 is None or R1.is_duplicate or not R1.has_tag('DS') or R1.is_qcfail:
continue
if R1.get_tag('lh')!='TA':
continue
if filter_function is not None and not filter_function(R1,R2):
continue
cut_pos = R1.get_tag('DS')
sample = R1.get_tag('SM')
if prefix is not None:
sample = (prefix, sample)
if alias is not None:
cut_counts[alias][sample] += 1
else:
bin_idx=int(cut_pos/bin_size)*bin_size
cut_counts[(contig,bin_idx)][sample] += 1
return cut_counts, contig, bam_path
def _get_contig_cuts(bam: str, contig: str, mqthreshold=50):
"""
Obtain np.array of all cuts for the supplied contig and bam file for each sample indiviually,
the cut arrays are sorted by genomic coordinate
Returns:
contig, {cell: np.array([cut1(int),cut2(int)])}
"""
single_cell_cuts = defaultdict(list)
with pysam.AlignmentFile(bam) as a:
for read in a.fetch(contig):
if read.is_read1 and not read.is_qcfail and not read.is_duplicate and read.mapping_quality>mqthreshold:
single_cell_cuts[read.get_tag('SM')].append(read.get_tag('DS'))
return contig, {
cell:np.array(sorted(cuts))
for cell, cuts in single_cell_cuts.items()
}
def bam_to_single_sample_cuts(bams: list, n_processes=None, contigs=None, mqthreshold=50):
"""
Obtain np.array of all cuts for the supplied contig and bam file for each sample indiviually,
the cut arrays are sorted by genomic coordinate
When contigs is not supplied cuts are determined for all contigs except random scaffolds and alternative alleles
Returns:
cell_cuts: {cell: {contig: np.array([cut1(int),cut2(int)])}},
total_cuts_per_cell : {cell:total_cuts (int)}
"""
cell_cuts = {} # cell -> contig -> cuts
total_cuts_per_cell = Counter()
with Pool(n_processes) as workers:
if contigs is None:
contigs = [c for c in get_contigs(bams[0]) if is_main_chromosome(c)]
for contig, cuts in workers.imap(pool_wrapper, ((
_get_contig_cuts, {'contig':contig,
'bam':bam,
'mqthreshold':mqthreshold
})
for contig, bam in product(contigs,bams))):
for cell, cc in cuts.items():
if not cell in cell_cuts:
cell_cuts[cell] = {}
cell_cuts[cell][contig] = cc
total_cuts_per_cell[cell] += len(cc)
return cell_cuts, total_cuts_per_cell
def get_binned_counts_prefixed(bam_dict, bin_size, regions=None, ta=False, filter_function=None, n_threads=None) -> pd.DataFrame:
"""
Same as get_binned_counts but accespts a bam_dict, {'alias':[bam file, bam file], 'alias2':[bam file, ]}
The alias is added as the first level of the output dataframe multi-index
Returns:
pd.DataFrame
"""
fs = 1000
aliased = False
if regions is None:
regions = [(c,None,None) for c in get_contig_sizes(list(bam_dict.values())[0][0]).keys()]
else:
for i,r in enumerate(regions):
if type(r)==str:
regions[i] = (r,None,None)
elif len(r)==3:
contig, start, end =r
if type(start)==int:
start = max(0,start-fs)
regions[i] = (contig,start,end)
else:
contig, start, end, alias =r
if type(start)==int:
start = max(0,start-fs)
regions[i] = (contig,start,end, alias)
aliased=True
#if aliased:
# jobs = [(bam_path, bin_size, *region) for region, bam_path in product(regions, bams)]
jobs = []
for bam_group, bam_list in bam_dict.items():
for region in regions:
for bam_path in bam_list:
jobs.append( (bam_path, bin_size, *region, filter_function, None, bam_group) )
#jobs = [(bam_path, bin_size, *region, None, prefix) for region, bam_path in product(regions, bams)]
cut_counts = defaultdict(Counter)
with Pool(n_threads) as workers:
for i, (cc, contig, bam_path) in enumerate(workers.imap(_generate_count_dict_prefixed if not ta else _generate_ta_count_dict_prefixed,jobs)):
for k,v in cc.items():
cut_counts[k] += v
print(i,'/', len(jobs), end='\r')
return pd.DataFrame(cut_counts).T
def get_binned_counts(bams, bin_size, regions=None, filter_function=None, n_threads=None):
fs = 1000
if regions is None:
regions = [(c,None,None) for c in get_contig_sizes(bams[0]).keys()]
else:
for i,r in enumerate(regions):
if type(r)==str:
regions[i] = (r,None,None)
else:
contig, start, end =r
if type(start)==int:
start = max(0,start-fs)
regions[i] = (contig,start,end)
jobs = [(bam_path, bin_size, *region, filter_function) for region, bam_path in product(regions, bams)]
cut_counts = defaultdict(Counter)
with multiprocessing.Pool(n_threads) as workers:
for i, (cc, contig, bam_path) in enumerate(workers.imap(_generate_count_dict,jobs)):
for k,v in cc.items():
cut_counts[k] += v
print(i,'/', len(jobs), end='\r')
return pd.DataFrame(cut_counts).T
def fill_range(start, end, step):
"""
range iterator from start to end with stepsize step
but generates a final step which steps to end
Args:
start (int) : start
end (int)
step (int) : step
Example:
>>> list(fill_range(0,10,4))
[(0, 4), (4, 8), (8, 10)]
"""
e = start
for s in range(start, end, step):
e = s + step
if e > end:
e = e - step
break
yield s, e
if e < end:
yield e, end
def trim_rangelist(rangelist, start, end):
"""
Trim list of start, end coordinates to only keep ranges within start and end, and trim ranges which overlap with start/end.
Args:
rangelist(list) : list of tuples (start,end)
start(int) : inclusive start coordiante
end(int) : exclusive end coordinate
Yields:
(range_start, range_end)
"""
for s, e in rangelist:
overlap = False
if s >= start and s < end:
overlap = True
if e >= start and e < end:
overlap = True
if not overlap:
continue
yield max(s, start), min(e, end)
def get_bins_from_bed_iter(path, contig=None):
with gzip.open(path) if path.endswith('.gz') else open(path) as f:
for line in f:
c, start, end = line.strip().split(None, 3)[:3]
start, end = int(start), int(end)
if contig is None or c == contig:
yield c, start, end
def get_bins_from_bed_dict(path, contig=None):
bd = {}
for c, start, end in get_bins_from_bed_iter(path, contig=contig):
if not c in bd:
bd[c] = list()
bd[c].append((start, end))
return bd
def range_contains_overlap(clist):
"""
Check if the supplied iteratorof start,end coordinates contains an overlap
Args:
clist (list) : sorted list of start,end coordinates of regions
"""
clist = sorted(clist)
# check for overlaps...
if len(clist) < 2:
return False
for (start, end), (next_start, next_end) in windowed(clist, 2):
if start > next_start or end > next_start or end > next_end or start > next_end:
# print(f'Overlap between {start}:{end} and {next_start}:{next_end}')
return True
return False
def _merge_overlapping_ranges(clist):
merged = False
for (start, end), (next_start, next_end) in windowed(clist, 2):
if merged:
merged = False
continue
if start > next_start or end > next_start or end > next_end or start > next_end:
yield (min(start, next_start), max(next_end, end))
merged = True
else:
yield start, end
if not merged:
yield clist[-1]
def merge_overlapping_ranges(clist):
clist = sorted(clist)
while range_contains_overlap(clist):
clist = sorted(list(_merge_overlapping_ranges(clist)))
return clist
def blacklisted_binning_contigs(contig_length_resource: str, bin_size: int, fragment_size: int,
blacklist_path: str = None, contig_whitelist: list = None) -> Generator:
"""
Generate a list of (contig, bin_start, bin_end) tuples of size bin_size or smaller or when fragment_size is supplied
(contig, bin_start, bin_end, fetch_start, fetch_end). All regions present in the blacklist BED file will not be
part of the generated bins.
Args:
contig_length_resource(str): Path to bam file from which to extract the contig lengths
bin_size(int) : maximum size of generated bins (might produce some bins which are smaller)
fragment_size(int) : When this value is supplied fetch_start, fetch_end will be produced which will be equal to
bin_start-fragment_size and bin_end+fragment size. But will never overlap with blacklisted
regions or exceed contig boundaries.
blacklist_path(str): path to blacklist bed file
contig_whitelist(iterable): A set of contigs to only include in the result. All contigs are included when
contig_whitelist is not specified.
Returns:
bin_tuples(Generator): (contig, bin_start, bin_end),
( contig, bin_start, bin_end, fetch_start, fetch_end ) when fragment_size is specified
"""
if blacklist_path is not None:
blacklist_dict = get_bins_from_bed_dict(blacklist_path)
else:
blacklist_dict = {}
for contig, length in (get_contig_sizes(contig_length_resource).items()
if type(contig_length_resource) is str else contig_length_resource):
if contig_whitelist is not None and not contig in contig_whitelist:
continue
if fragment_size is not None:
for bin_start, bin_end, fetch_start, fetch_end in \
blacklisted_binning(
start_coord=0,
end_coord=length,
bin_size=bin_size,
blacklist=sorted(blacklist_dict.get(contig, [])),
fragment_size=fragment_size):
yield contig, bin_start, bin_end, fetch_start, fetch_end
else:
for bin_start, bin_end in \
blacklisted_binning(
start_coord=0,
end_coord=length,
bin_size=bin_size,
blacklist=sorted(blacklist_dict.get(contig, []))):
yield contig, bin_start, bin_end
def invert_ranges( ranges: list, max_coord:int, min_coord=0 ):
# Invert genomic ranges
# given a range list [ [start,end> , ]
# Generate inverted ranges starting from min_coord, not overlapping
# with the supplied ranges
# Make sure there are no overlaps by running merge_overlapping_ranges before applying this function
current = min_coord
# fetch until the first range start pos:
for b_start, b_end in ranges:
if b_start!=current:
assert b_start > current, 'Run merge_overlapping_ranges before using this function'
yield current, b_start
current = b_end
if b_end<max_coord:
yield b_end,max_coord
def blacklisted_binning(start_coord: int, end_coord: int, bin_size: int, blacklist: list = None,
fragment_size: int = None):
"""
Obtain a list of regions to fetch between start coord and end_coord with given bin_size and
excluding the regions in the blacklist. Optimizes the bin size and allows for a fragment_size parameter to be set
which expands the bins with fragment size without overlap with blacklist regions and
start_coord, end_coord boundaries
Args:
start_coord(int)
end_coord(int)
bin_size(int)
blacklist(list) : format (start,end), (start,end)
fragment_size(int)
Yields:
bin_start, bin_end when fragment_size is None, bin_start, bin_end, fetch_start, fetch_end otherwise
"""
if blacklist is None:
blacklist = []
elif len(blacklist) > 1:
blacklist = merge_overlapping_ranges(blacklist)
current = start_coord
for i, (start, end) in enumerate(chain(
trim_rangelist(blacklist, start_coord, end_coord),
[(end_coord, end_coord + 1), ])):
# Fill from current to start:
if start == current:
current = end
continue
total_bins = len(list(fill_range(current, start, bin_size)))
if total_bins < 0:
continue
if total_bins == 0:
total_bins = 1
local_bin_size = int((start - current) / total_bins)
for fi, (pos_s, pos_e) in enumerate(fill_range(current, start, local_bin_size)):
if fragment_size is None:
yield pos_s, pos_e
else:
fs = pos_s - fragment_size
fe = pos_e + fragment_size
if fi == 0:
# Left clip, no available bins on the left
fs = pos_s
if fi == total_bins - 1:
fe = pos_e
yield pos_s, pos_e, fs, fe
current = pos_e
current = end
def obtain_approximate_reference_cut_position(site: int, contig: str, alt_spans: dict) -> tuple:
alt_contig, alt_start, alt_end = alt_spans[contig]
return contig, site + alt_start
def obtain_counts(commands, reference, live_update=True, show_n_cells=4, update_interval=3, threads=4, count_function=None, show_progress=False):
if count_function is None:
count_function = count_fragments_binned
if live_update:
from singlecellmultiomics.utils.plotting import GenomicPlot
import matplotlib.pyplot as plt
cell_plots = {}
for cell_index in range(show_n_cells):
gplot = GenomicPlot(reference)
fig = gplot.get_figure()
fig.canvas.draw()
cell_plots[cell_index] = {'plot': gplot, 'fig': fig}
plt.pause(0.01)
# import random
# random.shuffle(commands)
counts = {}
prev = None
top_cells = None
start_time = datetime.now()
update_method = 'partial_df'
if show_progress:
commands = list(commands) # generator -> list
print(f"Counting started ({len(commands)})", end = '\r')
with multiprocessing.Pool(threads) as workers:
for i, result in enumerate(workers.imap_unordered(count_function,
commands)):
if show_progress:
print(f"Counting progress { ((i/len(commands))*100):.2f} % ({i} / {len(commands)}) ", end = '\r')
# Result is of the format: counts[bin_id][sample] = obs (int)
#counts.update(result)
for bin_id, sample_dict in result.items():
if not bin_id in counts:
counts[bin_id] = sample_dict
else:
counts[bin_id].update(sample_dict)
if live_update and update_method == 'partial_df':
if (datetime.now() - start_time).total_seconds() > 2 and (
prev is None or (datetime.now() - prev).total_seconds() >= update_interval):
if len(result) == 0:
continue
df = pd.DataFrame(counts).T
if df.sum().sum() == 0:
continue
prev = datetime.now()
if top_cells is None:
top_cells = df.sum().sort_values()[-show_n_cells:]
df = df[top_cells.index].fillna(0)
df = np.clip(0, 2, df / np.percentile(df, 99, axis=0))
for contig in [list(result.keys())[0][0]]:
x = np.array([(stop + start) / 2 for start, stop in df.loc[contig].index.values])
for cell_index, (cell, row) in enumerate(df.loc[contig].T.iterrows()):
gplot = cell_plots[cell_index]['plot']
gplot.reset_axis(contig)
fig = cell_plots[cell_index]['fig']
fig.suptitle(cell)
gplot[contig].scatter(x, row.values, s=0.1, c='k')
fig.canvas.draw()
plt.pause(0.001)
if show_progress:
print("Counting finished ")
# Show final result
if live_update:
df = pd.DataFrame(counts).T
df = df[top_cells.index].fillna(0)
df = np.clip(0, 2, df / np.percentile(df, 99, axis=0))
for contig in cell_plots[0]['plot'].contigs:
x = np.array([(stop + start) / 2 for start, stop in df.loc[contig].index.values])
for cell_index, (cell, row) in enumerate(df.loc[contig].T.iterrows()):
gplot = cell_plots[cell_index]['plot']
gplot.reset_axis(contig)
fig = cell_plots[cell_index]['fig']
fig.suptitle(cell)
gplot[contig].scatter(x, row.values, s=0.1, c='k')
fig.canvas.draw()
plt.pause(0.001)
return counts
def read_counts(read, min_mq, dedup=True, read1_only=False,ignore_mp=False, ignore_qcfail=False, verbose=False):
if read1_only and (not read.is_read1 or read is None):
if verbose:
print('NOT READ1')
return False
if read.is_qcfail and not ignore_qcfail:
if verbose:
print('QCFAIL')
return False
if dedup and read.is_duplicate:
if verbose:
print('DUPLICATE')
return False
if not ignore_mp and read.has_tag('mp') and read.get_tag('mp') != 'unique':
if verbose:
print('MP')
return False
if (min_mq is not None and read.mapping_quality < min_mq):
if verbose:
print('MAPQ')
return False
if verbose:
print('OK')
# if read.has_tag('RZ') and read.get_tag('RZ') != 'CATG':
# return False
return True
def gc_correct(args):
observations, gc_vector, MAXCP = args
correction = lowess(observations, gc_vector)
return np.clip(observations / np.interp(gc_vector, correction[:, 0], correction[:, 1]), 0, MAXCP)
def genomic_bins_to_gc(reference, df):
bins_to_gc = {}
for k in df.columns:
if len(k)==4:
(allele, contig, start, end) = k
else:
(contig, start, end) = k
if not k in bins_to_gc:
sequence = reference.fetch(contig, start, end).upper()
gc = sequence.count('G') + sequence.count('C')
div = ((sequence.count('A') + sequence.count('T') + gc))
if div == 0:
# There is no data, plop the mean gc in later
bins_to_gc[k] = np.nan
else:
gcrat = (gc) / div
bins_to_gc[k] = gcrat
return bins_to_gc
def gc_correct_cn_frame(df, reference, MAXCP, threads, norm_method='median'):
# Perform GC correction
chrom_sizes = dict(zip(reference.references, reference.lengths))
# Extract GC percentage from reference for the selected bin size:
bins_to_gc = genomic_bins_to_gc(reference, df)
qf = pd.DataFrame({'gc': bins_to_gc})
qf = qf.fillna(qf['gc'].mean())
# Join the GC table with the count matrix
gc_matched = df.T.join(qf, how='left')['gc']
# This performs GC correction for every cell using loess regression
with multiprocessing.Pool(threads) as workers:
keep_bins = df.columns
gc_vector = gc_matched[keep_bins]
corrected_cells = list(workers.imap(
gc_correct, [(row, gc_vector.values, MAXCP) for cell, row in df.iterrows()]))
corrected_cells = pd.concat(corrected_cells, axis=1).T
if norm_method == 'median':
corrected_cells = ((corrected_cells.T / corrected_cells.median(1)) * 2).T
elif norm_method == 'mean':
corrected_cells = ((corrected_cells.T / corrected_cells.mean(1)) * 2).T
else:
raise ValueError('norm_method not understood')
return corrected_cells
def generate_jobs(alignments_path, bin_size=1_000_000, bins_per_job=10):
for job_group in (((contig, start, start + bin_size * bins_per_job)
for start in range(0, length, bin_size * bins_per_job))
for contig, length in
get_contig_sizes(alignments_path).items()):
yield from job_group
def generate_commands(alignments_path,
bin_size=1_000_000,
bins_per_job=10,
alt_spans=None,
min_mq=50,
max_fragment_size=1000,
head=None,
key_tags=None,
dedup=True,
kwargs=None,
skip_contigs=None
):
if type(alignments_path) is list:
iterfiles = alignments_path
else:
iterfiles = [alignments_path]
for alignments_path in iterfiles:
for i, (contig, start, end) in enumerate(
generate_jobs(alignments_path=alignments_path, bin_size=bin_size, bins_per_job=bins_per_job)):
if skip_contigs is not None and contig in skip_contigs:
continue
yield (alignments_path, bin_size, max_fragment_size, \
contig, start, end, \
min_mq, alt_spans, key_tags, dedup, kwargs)
if head is not None and i >= (head - 1):
break
def count_methylation_binned(args):
(alignments_path, bin_size, max_fragment_size, \
contig, start, end, \
min_mq, alt_spans, key_tags, dedup, kwargs) = args
# single_location => count single cpgs
single_location = kwargs.get('single_location', False)
dyad_mode = kwargs.get('dyad_mode', False)
# Stranded mode:
stranded = kwargs.get('stranded', False)
contexts_to_capture = kwargs.get('contexts_to_capture', None) # List
count_reads = kwargs.get('count_reads', False)
reference = None
if contexts_to_capture is not None:
context_radius = kwargs.get('context_radius', None)
reference_path = kwargs.get('reference_path', None)
if reference_path is not None:
reference = CachedFasta(FastaFile(reference_path))
default_sample_name = kwargs.get('default_sample_name', 'bulk_sample')
min_counts_per_bin = kwargs.get('min_counts_per_bin',10) # Min measurements across all cells
# Cant use defaultdict because of pickles :\
met_counts = MethylationCountMatrix(threads=kwargs.get('threads', None)) # Sample->(contig,bin_start,bin_end)-> [methylated_counts, unmethylated]
if count_reads:
read_count_dict = defaultdict(Counter) # location > sample > read_obs
# Define which reads we want to count:
known = set()
if 'known' in kwargs and kwargs['known'] is not None:
# Only ban the very specific TAPS conversions:
try:
with pysam.VariantFile(kwargs['known']) as variants:
for record in variants.fetch(contig, start, end):
if record.ref=='C' and 'T' in record.alts:
known.add( record.pos)
if record.ref=='G' and 'A' in record.alts:
known.add(record.pos)
except ValueError:
# This happends on contigs not present in the vcf
pass
p = 0
start_time = datetime.now()
with pysam.AlignmentFile(alignments_path, threads=4) as alignments:
# Obtain size of selected contig:
contig_size = get_contig_size(alignments, contig)
if contig_size is None:
raise ValueError('Unknown contig')
# Determine where we start looking for fragments:
f_start = max(0, start - max_fragment_size)
f_end = min(end + max_fragment_size, contig_size)
for p, read in enumerate(alignments.fetch(contig=contig, start=f_start,
stop=f_end)):
if p%50==0 and 'maxtime' in kwargs and kwargs['maxtime'] is not None:
if (datetime.now() - start_time).total_seconds() > kwargs['maxtime']:
print(f'Gave up on {contig}:{start}-{end}')
break
if not read_counts(read, min_mq=min_mq, dedup=dedup, verbose=False):
continue
tags = dict(read.tags)
sample = tags.get('SM', default_sample_name)
if count_reads and (read.is_read1 or not read.is_paired):
site = tags.get('DS', (read.reference_end if read.is_reverse else read.reference_start))
# Obtain the bin index
if single_location:
bin_start = site
bin_end = site + 1
else:
bin_i = int(site / bin_size)
bin_start = bin_size * bin_i
bin_end = min(bin_size * (bin_i + 1), contig_size)
read_count_dict[(read.reference_name,bin_start, bin_end)][sample] += 1
for i, (qpos, site) in enumerate(read.get_aligned_pairs(matches_only=True)):
# Don't count sites outside the selected bounds
if site < start or site >= end:
continue
if site in known:
continue
call = tags['XM'][i]
final_call = None
if contexts_to_capture is not None:
if call=='.':
continue
try:
context = reference.fetch(read.reference_name,
site - context_radius,
site + context_radius + 1).upper()
except KeyError:
raise KeyError(f'The supplied reference file does not match the reads. Contig "{read.reference_name}" missing from supplied reference.')
if len(context)!=(context_radius*2 +1):
continue
if context[context_radius] == 'G':
context = reverse_complement(context)
if context in contexts_to_capture:
final_call = call.isupper()
else:
continue
if context[context_radius] not in 'CG':
# This is bad... the position is not a C or G. It might be another IUPAC nucleotide code.
#Ditch this READ if the ref base is A/T
if context[context_radius] in 'AT':
print(f'For read {read.reference_name}:{read.cigarstring} at {site}, query pos {qpos} a {context[context_radius]} was found in the reference. Ignoring this read')
break
#Ditch this BASE if IUPAC nucleotide code:
continue
elif call in 'Zz':
final_call = call=='Z'
#CpG
#^
if read.is_reverse and dyad_mode:
#GpC
# ^
site += 1
else:
continue
# Process alternative contig counts:
if alt_spans is not None and contig in alt_spans:
contig, site = obtain_approximate_reference_cut_position(
site, contig, alt_spans)
# Obtain the bin index
if single_location:
bin_i = site
bin_start = site
bin_end = site + 1
else:
bin_i = int(site / bin_size)
bin_start = bin_size * bin_i
bin_end = min(bin_size * (bin_i + 1), contig_size)
# Add additional tag information: (For example the allele tag)
if key_tags is not None:
tag_values = [(read.get_tag(tag) if read.has_tag(tag) else None) for tag in key_tags]
if stranded:
bin_id = (*tag_values, contig, bin_start, bin_end, '+-'[read.is_reverse])
else:
bin_id = (*tag_values, contig, bin_start, bin_end)
else:
if stranded:
bin_id = (contig, bin_start, bin_end, '+-'[read.is_reverse])
else:
bin_id = (contig, bin_start, bin_end)
if final_call is not None:
met_counts[sample, bin_id][final_call]+=1
met_counts.prune(min_samples=kwargs.get('min_samples',0), min_variance=kwargs.get('min_variance',0))
if reference is not None:
reference.handle.close()
return met_counts, (read_count_dict if count_reads else None)
def count_fragments_binned(args):
(alignments_path, bin_size, max_fragment_size, \
contig, start, end, \
min_mq, alt_spans, key_tags, dedup, kwargs) = args
counts = {} # Sample->(contig,bin_start,bin_end)->counts
# Define which reads we want to count:
p = 0
ignore_mp = kwargs.get('ignore_mp',False)
with pysam.AlignmentFile(alignments_path, threads=4) as alignments:
# Obtain size of selected contig:
contig_size = get_contig_size(alignments, contig)
if contig_size is None:
raise ValueError('Unknown contig')
# Determine where we start looking for fragments:
f_start = max(0, start - max_fragment_size)
f_end = min(end + max_fragment_size, contig_size)
for p, read in enumerate(alignments.fetch(contig=contig, start=f_start,
stop=f_end)):
if not read_counts(read, min_mq=min_mq, dedup=dedup, read1_only=True,ignore_mp=ignore_mp):
continue
# Extract the site
try:
site = int(read.get_tag('DS'))
except KeyError:
if read.reference_start is not None:
site = read.reference_start
elif read.reference_end is not None:
site = read.reference_end
else:
continue
# Don't count sites outside the selected bounds
if site < start or site >= end:
continue
try:
sample = read.get_tag('SM')
except KeyError:
sample = 'bulk'
# Process alternative contig counts:
if alt_spans is not None and contig in alt_spans:
contig, site = obtain_approximate_reference_cut_position(
site, contig, alt_spans)
# Obtain the bin index
bin_i = int(site / bin_size)
bin_start = bin_size * bin_i
bin_end = min(bin_size * (bin_i + 1), contig_size)
# Add additional tag information: (For example the allele tag)
if key_tags is not None:
tag_values = [(read.get_tag(tag) if read.has_tag(tag) else None) for tag in key_tags]
bin_id = (*tag_values, contig, bin_start, bin_end)
else:
bin_id = (contig, bin_start, bin_end)
# Add a (single) count tot the dictionary:
if not bin_id in counts:
counts[bin_id] = {}
if not sample in counts[bin_id]:
counts[bin_id][sample] = 1
else:
counts[bin_id][sample] += 1
return counts
def count_fragments_binned_wrap(args):
(alignments_path, bin_size, max_fragment_size, \
contig, start, end, \
min_mq, alt_spans, dedup, kwargs) = args
tp = f'./TEMP_{contig}_{start}.pickle.gz'
res = os.system(f'bamBinCounts.py {alignments_path} -o {tp} -start {start} -end {end} -contig {contig}')
with gzip.open(tp, 'rb') as pf:
result = pickle.load(pf)
os.remove(tp)
return result
def count_fixed_width_binned_regions(path:str,
contig:str,
bin_size:int,
n_bins:int,
contig_starts:int,
smap:dict,
min_mapq: int = 1,
identifier=None,
read_pass_function=None # Function which is called for every read, when True, the read is taken into account needs (DS) tag
):
"""
Count using fixed width regions with bin_size
The header is precomputed
"""
counts = np.zeros((len(smap),n_bins))
with pysam.AlignmentFile(path) as a:
# Standard filter loop
if read_pass_function is not None:
for read in a.fetch(contig):
if not read_pass_function(read):
continue
ds = read.get_tag('DS')
#bi = int(read.get_tag('bi'))
bi = smap[read.get_tag('SM')]
bx = int(ds/bin_size) + contig_starts[contig]
counts[bi,bx] +=1
else:
for read in a.fetch(contig):
if read.is_read2 or read.is_qcfail or read.is_duplicate:
continue
if not read.has_tag('DS'):
continue
if read.mapping_quality<min_mapq:
continue
ds = read.get_tag('DS')
#bi = int(read.get_tag('bi'))
bi = smap[read.get_tag('SM')]
bx = int(ds/bin_size) + contig_starts[contig]
counts[bi,bx] +=1
if identifier is not None:
return counts,identifier
else:
return counts
def count_multi_sample(patientsToBam, bin_size, n_threads=None, exclude_contigs=('MT',), verbose=False, include_contigs=None, read_pass_function=None):
cmds = []
headers = dict()
sample_lists = dict()
countsDict = {}
if verbose:
print("Obtaining list of samples and contigs")
for patient,bampath in patientsToBam.items():
cs = {c:s for c,s in get_contig_sizes(bampath).items() if (is_main_chromosome(c) and not c in exclude_contigs) and (include_contigs is None or c in include_contigs)}
header = []
contig_starts = {}
for c,s in cs.items():
contig_starts[c] = len(header)
for bin_start in range(0,s,bin_size):
bin_end = bin_start+bin_size
bs = bin_end - bin_start
if bs==bin_size: # drop non complete bins:
header.append((c,bin_start,bin_end))
# create single cell matrix:
sample_list = list(get_samples_from_bam(bampath))
smap = {s:i for i,s in enumerate(sample_list)}
counts = np.zeros((len(smap),len(header)))
countsDict[patient] = counts # Initialise empty matrix
cmds += [
(count_fixed_width_binned_regions, {
'path':bampath,
'n_bins':len(header),
'contig_starts':contig_starts,
'bin_size':bin_size,
'contig':contig,
'smap':smap,
'read_pass_function':read_pass_function,
'identifier':patient
}) for contig in cs.keys()
]
headers[patient] = header
sample_lists[patient] = sample_list
if verbose:
print("Counting")
with Pool(n_threads) as workers:
for i,(r,identifier) in enumerate(workers.imap(pool_wrapper, cmds, chunksize=1)):
countsDict[identifier] += r
if verbose:
print(f"Progress: {(i/len(cmds))*100:.2f}% ", end='\r')
if verbose:
print("Finished counting, Now creating dataframes")
for identifier in countsDict:
countsDict[identifier] = pd.DataFrame(
countsDict[identifier],
columns=pd.MultiIndex.from_tuples(headers[identifier]),
index=sample_lists[identifier])
return countsDict
if __name__ == '__main__':
argparser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Extract counts from bam file')
argparser.add_argument('alignmentfile', type=str)
argparser.add_argument('-head', type=int, default=None)
argparser.add_argument('-bin_size', type=int, default=250_000)
argparser.add_argument('-t', type=int, default=16, help='Threads')
argparser.add_argument('-j', type=int, default=5, help='Bins per worker')
argparser.add_argument(
'-min_mq',
type=int,
default=30,
help='Minimum mapping quality')
argparser.add_argument('-max_fragment_size', type=int, default=1000)
argparser.add_argument('-o', type=str, required=True)
args = argparser.parse_args()
counts = {}
with multiprocessing.Pool(args.t) as workers:
job_gen = generate_commands(
alignments_path=args.alignmentfile,
bin_size=args.bin_size,
bins_per_job=args.j,
head=args.head)
for job_total, _ in enumerate(job_gen):
pass
job_gen = generate_commands(
alignments_path=args.alignmentfile,
bin_size=args.bin_size,
bins_per_job=args.j,
head=args.head)
for i, result in enumerate(workers.imap_unordered(count_fragments_binned,
job_gen)):
counts.update(result)
print(f'\r{i}/{job_total}', end='')
if args.o.endswith('.pickle.gz'):
pd.DataFrame(counts).to_pickle(args.o)
elif args.o.endswith('.csv'):
pd.DataFrame(counts).to_csv(args.o)
else:
with gzip.open(args.o, 'wb') as out:
pickle.dump(counts, out)