forked from chiulab/surpi
-
Notifications
You must be signed in to change notification settings - Fork 0
/
SURPI.sh
executable file
·1315 lines (1162 loc) · 66.1 KB
/
SURPI.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/bin/bash
#
# SURPI.sh
#
# This is the main driver script for the SURPI pipeline.
# Chiu Laboratory
# University of California, San Francisco
#
#
# Copyright (C) 2014 Samia N Naccache, Scot Federman, and Charles Y Chiu - All Rights Reserved
# SURPI has been released under a modified BSD license.
# Please see license file for details.
#
SURPI_version="1.0.22"
optspec=":f:hvz:"
bold=$(tput bold)
normal=$(tput sgr0)
green='\e[0;32m'
red='\e[0;31m'
endColor='\e[0m'
host=$(hostname)
scriptname=${0##*/}
while getopts "$optspec" option; do
case "${option}" in
f) config_file=${OPTARG};; # get parameters from config file if specified
h) HELP=1;;
v) VERIFICATION=1;;
z) create_config_file=${OPTARG}
configprefix=${create_config_file%.fastq}
;;
:) echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
esac
done
if [[ $HELP -eq 1 || $# -lt 1 ]]
then
cat <<USAGE
${bold}SURPI version ${SURPI_version}${normal}
This program will run the SURPI pipeline with the parameters supplied by the config file.
${bold}Command Line Switches:${normal}
-h Show this help & ignore all other switches
-f Specify config file
This switch is used to initiate a SURPI run using a specified config file. Verification (see -v switch) will occur at the beginning of the run.
The pipeline will cease if SURPI fails to find a software dependency or necessary reference data.
-v Verification mode
When using verification mode, SURPI will verify necessary dependencies, but will
stop after verification. This same verification is also done
before each SURPI run.
• software dependencies
SURPI will check for the presence of all software dependencies.
• reference data specified in config file
SURPI does a cursory check for the presence of reference data. This check is
not a comprehensive test of the reference data.
• taxonomy lookup functionality
SURPI verifies the functionality of the taxonomy lookup.
• FASTQ file (if requested in config file)
SURPI uses fastQValidator to check the integrity of the FASTQ file.
-z Create default config file and go file. [optional] (specify fastq filename)
This option will create a standard .config file, and go file.
${bold}Usage:${normal}
Create default config and go file.
$scriptname -z test.fastq
Run SURPI pipeline in verification mode:
$scriptname -f config -v
Run SURPI pipeline with the config file:
$scriptname -f config
USAGE
exit
fi
if [[ $create_config_file ]]
then
echo "PATH=/usr/local/bin/surpi:/usr/local/bin:/usr/bin/:/bin" > go_$configprefix
echo "nohup $scriptname -f $configprefix.config > SURPI.$configprefix.log 2> SURPI.$configprefix.err" >> go_$configprefix
chmod +x go_$configprefix
#------------------------------------------------------------------------------------------------
(
cat <<EOF
# This is the config file used by SURPI. It contains mandatory parameters,
# optional parameters, and server related constants.
# Do not change the config_file_version - it is auto-generated.
# and used to ensure that the config file used matches the version of the SURPI pipeline run.
config_file_version="$SURPI_version"
##########################
# Input file
##########################
#To create this file, concatenate the entirety of a sequencing run into one FASTQ file.
#SURPI currently does not have paired-end functionality, we routinely concatenate Read 1 and Read 2 into the unified input file.
#For SURPI to provide proper readcount statistics, all read headers in a single SURPI input dataset should share a
#common 3 letter string (eg: M00, HWI, HIS, SCS, SRR for example). SURPI currently selects the string from the first and last reads only.
inputfile="$create_config_file"
#input filetype. [FASTA/FASTQ]
inputtype="FASTQ"
#FASTQ quality score type: [Sanger/Illumina]
#Sanger = Sanger score (ASCII-33)
#Illumina = Illumina score (ASCII-64)
#Counterintuitively, the Sanger quality format is likely the method your data is encoded in if you are generating data on an Illumina machine after early 2011.
#Selecting Illumina quality on Sanger data will likely lead to improper preprocessing, resulting in preprocessed files of 0 length.
quality="Sanger"
#Adapter set used. [Truseq/Nextera/NexSolB/NexSolTruseq]
#Truseq = trims truseq adaptors
#Nextera = trims Nextera adaptors
adapter_set="Truseq"
#Verify FASTQ quality
# 0 = skip validation
# 1 [default] = run validation, don't check for unique names, quit on failure
# 2 = run validation, check for unique names, quit on failure (helpful with newer MiSeq output that has same name for read1 and read2 due to spacing)
# 3 = run validation, check for unique names, do not quit on failure
VERIFY_FASTQ=1
##########################
# Run Mode
##########################
#Run mode to use. [Comprehensive/Fast]
#Comprehensive mode allows SNAP to NT -> denovo contig assembly -> RAPSearch to Viral proteins or NR
#Fast mode allows SNAP to curated FAST databases
run_mode="Comprehensive"
#Below options are to skip specific steps.
#Uncomment preprocess parameter to skip preprocessing
#(useful for large data sets that have already undergone preprocessing step)
# If skipping preprocessing, be sure these files exist in the working directory.
# $basef.cutadapt.fastq
# $basef.preprocessed.fastq
#preprocess="skip"
##########################
# Preprocessing
##########################
#length_cutoff: after quality and adaptor trimming, any sequence with length smaller than length_cutoff will be discarded
length_cutoff="50"
#Cropping values. Highly recommended default = 10,75
#Cropping quality trimmed reads prior to SNAP alignment
#snapt_nt = Where to start crop
#crop_length = how long to crop
start_nt=10
crop_length=75
#quality cutoff ( -q switch in cutadapt )
quality_cutoff=18
##########################
# SNAP
##########################
#SNAP executable
snap="/usr/local/bin/snap-dev"
#SNAP edit distance for Computational Subtraction of host genome [Highly recommended default: d_human=12]
#see Section 3.1.2 MaxDist description: http://snap.cs.berkeley.edu/downloads/snap-1.0beta-manual.pdf
d_human=12
#SNAP edit distance for alignment to NCBI nt DB [validated only at: d=12]
d_NT_alignment=12
#snap_nt iterator to use. [inline/end]
#inline : compares each SNAP iteration to the previous as they are completed
# Uses more disk space, and should be faster for larger input files.
# also allows for concurrent SNAP runs.
#end : compares all SNAP iterations once they have all completed.
#These two methods should give identical results, but may have different performance.
#Note: use inline for now (6/24/14), there is a bug with "end"
snap_integrator="inline"
#only used if snap_integrator=end
#if using this parameter, the SNAP databases should reside on separate disks in order to increase throughput.
#(Mechanism for doing this is not yet in place)
num_simultaneous_SNAP_runs=1
##########################
# RAPSEARCH
##########################
#RAPSearch database method to use. [Viral/NR]
#Viral database contains viral proteins derived from genbank
#NR contains all NR proteins
rapsearch_database="Viral"
#RAPSearch e_cutoffs
#E-value of 1e+1, 1e+0 1e-1 is represented by RAPSearch2 http://omics.informatics.indiana.edu/mg/RAPSearch2/ in log form (1,0,-1).
#Larger E-values are required to find highly divergent viral sequences.
ecutoff_Vir="1"
ecutoff_NR="1"
#This parameter sets whether RAPSearch will be run in its fast mode or its normal mode.
# see RAPSearch -a option for details
# T will give (10~30 fold) speed improvement, at the cost of sensitivity at high e-values
# [T: perform fast search, F: perform normal search]
RAPSearch_NR_fast_mode="T"
##########################
# de novo Assembly
##########################
#kmer value for ABySS in DeBruijn portion of denovo contig assembly. Highly recommended default=34
abysskmer=34
#Set ignore_barcodes_for_de_novo=N [default] to deNovo assemble for each barcode independently.
#Set ignore_barcodes_for_de_novo=Y to assemble all barcodes together into a single assembly.
ignore_barcodes_for_de_novo=N
#e value for BLASTn used in coverage map generation
eBLASTn="1e-15"
##########################
# Reference Data
##########################
# SNAP-indexed database of host genome (for subtraction phase)
# SURPI will subtract all SNAP databases found in this directory from the input sequence
# Useful if you want to subtract multiple genomes (without combining SNAP databases)
# or, if you need to split a db if it is larger than available RAM.
SNAP_subtraction_folder="/reference/hg19"
# directory for SNAP-indexed databases of NCBI NT (for mapping phase in comprehensive mode)
# directory must ONLY contain snap indexed databases
SNAP_COMPREHENSIVE_db_dir="/reference/COMP_SNAP"
# directory for SNAP-indexed databases for mapping phase in FAST mode
# directory must ONLY contain snap indexed databases
SNAP_FAST_db_dir="/reference/FAST_SNAP"
#Taxonomy Reference data directory
#This folder should contain the 3 SQLite files created by the script "create_taxonomy_db.sh"
#gi_taxid_nucl.db - nucleotide db of gi/taxonid
#gi_taxid_prot.db - protein db of gi/taxonid
#names_nodes_scientific.db - db of taxonid/taxonomy
taxonomy_db_directory="/reference/taxonomy"
#RAPSearch viral database name: indexed protein dataset (all of Viruses)
#make sure that directory also includes the .info file
RAPSearch_VIRUS_db="/reference/RAPSearch/rapsearch_viral_db"
#RAPSearch nr database name: indexed protein dataset (all of NR)
#make sure that directory also includes the .info file
RAPSearch_NR_db="/reference/RAPSearch/rapsearch_nr_db"
ribo_snap_bac_euk_directory="/reference/RiboClean_SNAP"
##########################
# Server related values
##########################
#Number of cores to use. Will use all cores on machine if unspecified.
#Uncomment the parameter to set explicitly.
#cores=64
#specify a location for storage of temporary files.
#Space needed may be up to 10x the size of the input file.
#This folder will not be created by SURPI, so be sure it already exists with proper permissions.
temporary_files_directory="/tmp/"
#This parameter controls whether dropcache is used throughout the pipeline. If free RAM is less than cache_reset,
# then dropcache. If cache_reset=0, then dropcache will never be used.
cache_reset="0"
##########################
# AWS related values
##########################
# AWS_master_slave will start up a slave instance on AWS for each division of the nt database
# It will be more costly, but should run significantly faster than the solo method, which
# runs each NT division through SNAP serially on a single machine.
# If using the "AWS_master_slave" option, be sure that all parameters in the AWS section below are
# set properly.
# These values are only used if the "AWS_master_slave" option is set below.
# Note: this method is currently incomplete and experimental.
#Which method to use for SNAP to nt [AWS_master_slave/solo]
# AWS_master_slave will start up a slave instance on AWS for each division of the nt database
# It will be more costly, but should run significantly faster than the solo method, which
# runs each NT division through SNAP serially on a single machine.
# If using the "AWS_master_slave" option, be sure that all parameters in the AWS section below are
# set properly.
#6/24/14 AWS_master_slave option is currently experimental and incomplete. Please use "solo" for the time being.
snap_nt_procedure="solo"
#ami-b93264d0 = Ubuntu 12.04 HVM 64-bit
#ami-5ef61936 = custom AMI (ami-b93264d0 + SNAP setup)
ami_id="ami-5ef61936"
#Number of slave instances will not exceed this value. Useful for testing, in order to restrict instance count.
#Otherwise, number of instances should be equal to number of SNAP-NT database divisions. This value is
#automatically calculated by SURPI.
max_slave_instances=29
instance_type="c3.8xlarge"
#this parameter is currently tied to the $keypair used during slave_setup.sh. should be cleaned up prior to release
pemkey="/home/ubuntu/.ssh/surpi.pem"
keypair="surpi"
security_group="SURPI"
availability_zone="us-east-1d"
placement_group="surpi"
#specify directory for incoming data from slaves
#this directory will not be created by SURPI - it should pre-exist.
#There must be sufficient space in this directory to contain all returning compressed SAM files
incoming_dir="/ssd4/incoming"
EOF
) > $configprefix.config
#------------------------------------------------------------------------------------------------
echo "$configprefix.config generated. Please edit it to contain the proper parameters for your analysis."
echo "go_$configprefix generated. Initiate the pipeline by running this program. (./go_$configprefix)"
echo
exit
fi
if [[ -r $config_file ]]
then
source "$config_file"
#verify that config file version matches SURPI version
if [ "$config_file_version" != "$SURPI_version" ]
then
echo "The config file $config_file was created with SURPI $config_file_version."
echo "The current version of SURPI running is $SURPI_version."
echo "Please generate a new config file with SURPI $SURPI_version in order to run SURPI."
exit 65
fi
else
echo "The config file specified: $config_file is not present."
exit 65
fi
#check that $inputfile is a FASTQ file, and has a FASTQ suffix.
# convert from FASTA if necessary, add FASTQ suffix if necessary.
if [ $inputtype = "FASTQ" ]
then
if [ ${inputfile##*.} != "fastq" ]
then
ln -s $inputfile $inputfile.fastq
FASTQ_file=$inputfile.fastq
else
FASTQ_file=$inputfile
fi
elif [ $inputtype = "FASTA" ]
then
echo "Converting $inputfile to FASTQ format..."
FASTQ_file="$inputfile.fastq"
fasta_to_fastq $inputfile > $FASTQ_file
fi
#set cores. if none specified, use all cores present on machine
if [ ! $cores ]
then
total_cores=$(grep processor /proc/cpuinfo | wc -l)
cores=${cores:-$total_cores}
fi
if [ ! $run_mode ]
then
run_mode="Comprehensive"
fi
if [ "$run_mode" != "Comprehensive" -a "$run_mode" != "Fast" ]
then
echo "${bold}$run_mode${normal} is not a valid run mode - must be Comprehensive or Fast."
echo "Please specify a valid run mode using the -u switch."
exit 65
fi
#set cache_reset. if none specified:
# >500GB -> 200GB
# >200GB -> 150GB
# otherwise -> 50GB
# note: this may cause problems on a machine with <50GB RAM
if [ ! $cache_reset ]
then
total_ram=$(grep MemTotal /proc/meminfo | awk '{print $2}')
if [ "$total_ram" -gt "500000000" ] #500GiB
then
cache_reset=200 # This is 200GB
elif [ "$total_ram" -gt "200000000" ] #200 GiB
then
cache_reset=150
else
cache_reset=50
fi
fi
#these 2 parameters are for cropping prior to snap in the preprocessing stage
if [ ! $start_nt ]
then
start_nt=10
fi
if [ ! $crop_length ]
then
crop_length=75
fi
if [ "$adapter_set" != "Truseq" -a "$adapter_set" != "Nextera" -a "$adapter_set" != "NexSolB" -a "$adapter_set" != "NexSolTruseq" ]
then
echo "${bold}$adapter_set${normal} is not a valid adapter_set - must be Truseq, Nextera, NexSolTruseq, or NexSolB."
echo "Please specify a valid adapter set using the -a switch."
exit 65
fi
if [ "$quality" != "Sanger" -a "$quality" != "Illumina" ]
then
echo "${bold}$quality${normal} is not a valid quality - must be Sanger or Illumina."
echo "Please specify a valid quality using the -q switch."
exit 65
fi
if [ $quality = "Sanger" ]
then
quality="S"
else
quality="I"
fi
#RAPSearch e_cutoffs
if [ ! $ecutoff_Vir ]
then
ecutoff_Vir="1"
fi
if [ ! $ecutoff_NR ]
then
ecutoff_NR="0"
fi
if [ ! $d_human ]
then
d_human=12
fi
if [ ! $length_cutoff ]
then
echo "${bold}length_cutoff${normal} was not specified."
echo "Please specify a valid length_cutoff using the -x switch."
exit 65
fi
if [ "$rapsearch_database" != "Viral" -a "$rapsearch_database" != "NR" ]
then
echo "${bold}$rapsearch_database${normal} is not a valid RAPSearch database - must be Viral or NR."
echo "Please specify a valid rapsearch_database using the -r switch, or place one of the above options in your config file."
exit 65
fi
nopathf=${FASTQ_file##*/} # remove the path to file
basef=${nopathf%.fastq}
#verify that all software dependencies are properly installed
declare -a dependency_list=("gt" "seqtk" "fastq" "fqextract" "cutadapt" "prinseq-lite.pl" "dropcache" "$snap" "rapsearch" "fastQValidator" "abyss-pe" "ABYSS-P" "Minimo")
echo "-----------------------------------------------------------------------------------------"
echo "DEPENDENCY VERIFICATION"
echo "-----------------------------------------------------------------------------------------"
for command in "${dependency_list[@]}"
do
if hash $command 2>/dev/null; then
echo -e "$command: ${green}OK${endColor}"
else
echo
echo -e "$command: ${red}BAD${endColor}"
echo "$command does not appear to be installed properly."
echo "Please verify your SURPI installation and \$PATH, then restart the pipeline"
echo
dependency_check="FAIL"
fi
done
echo "-----------------------------------------------------------------------------------------"
echo "SOFTWARE VERSION INFORMATION"
echo "-----------------------------------------------------------------------------------------"
gt_version=$(gt -version | head -1 | awk '{print $3}')
seqtk_version=$(seqtk 2>&1 | head -3 | tail -1 | awk '{print $2}')
cutadapt_version=$(cutadapt --version)
prinseqlite_version=$(prinseq-lite.pl --version 2>&1 | awk '{print $2}')
snap_version=$(snap 2>&1 | grep version | awk '{print $5}')
snap_dev_version=$(snap-dev 2>&1 | grep version | awk '{print $5}')
rapsearch_version=$(rapsearch 2>&1 | head -2 | tail -1 | awk '{print $2}')
abyss_pe_version=$(abyss-pe version | head -2 | tail -1 | awk '{print $3}')
ABYSS_P_version=$(ABYSS-P --version | head -1 | awk '{print $3}')
Minimo_version=$(Minimo -h | tail -2 | awk '{print $2}')
echo -e "SURPI version: $SURPI_version"
echo -e "config file version: $config_file_version"
echo -e "gt: $gt_version"
echo -e "seqtk: $seqtk_version"
echo -e "cutadapt: $cutadapt_version"
echo -e "prinseq-lite: $prinseqlite_version${endColor}"
echo -e "snap: $snap_version${endColor}"
echo -e "snap-dev: $snap_dev_version${endColor}"
echo -e "RAPSearch: $rapsearch_version"
echo -e "abyss-pe: $abyss_pe_version"
echo -e "ABYSS-P: $ABYSS_P_version"
echo -e "Minimo: $Minimo_version"
echo "-----------------------------------------------------------------------------------------"
echo "REFERENCE DATA VERIFICATION"
echo "-----------------------------------------------------------------------------------------"
echo -e "SNAP subtraction db"
for f in $SNAP_subtraction_folder/*
do
if [ -f $f/Genome ]
then
echo -e "\t$f: ${green}OK${endColor}"
else
echo -e "\t$f: ${red}BAD${endColor}"
reference_check="FAIL"
fi
done
echo -e "SNAP Comprehensive Mode database"
for f in $SNAP_COMPREHENSIVE_db_dir/*
do
if [ -f $f/Genome ]
then
echo -e "\t$f: ${green}OK${endColor}"
else
echo -e "\t$f: ${red}BAD${endColor}"
if [ "$run_mode" = "Comprehensive" ]
then
reference_check="FAIL"
fi
fi
done
echo -e "SNAP FAST Mode database"
for f in $SNAP_FAST_db_dir/*
do
if [ -f $f/Genome ]
then
echo -e "\t$f: ${green}OK${endColor}"
else
echo -e "\t$f: ${red}BAD${endColor}"
if [ "$run_mode" = "Fast" ]
then
reference_check="FAIL"
fi
fi
done
#verify taxonomy is functioning properly
result=$( taxonomy_lookup_embedded.pl -d nucl -q $taxonomy_db_directory 149408158 )
if [ $result = "149408158" ]
then
echo -e "taxonomy: ${green}OK${endColor}"
else
echo -e "taxonomy: ${red}BAD${endColor}"
echo "taxonomy appears to be malfunctioning. Please check logs and config file to verify proper taxonomy functionality."
reference_check="FAIL"
fi
echo -e "RAPSearch viral database"
if [ -f $RAPSearch_VIRUS_db ]
then
echo -e "\t$RAPSearch_VIRUS_db: ${green}OK${endColor}"
else
echo -e "\t$RAPSearch_VIRUS_db: ${red}BAD${endColor}"
echo
reference_check="FAIL"
fi
if [ -f $RAPSearch_VIRUS_db.info ]
then
echo -e "\t$RAPSearch_VIRUS_db.info: ${green}OK${endColor}"
else
echo -e "\t$RAPSearch_VIRUS_db.info: ${red}BAD${endColor}"
echo
reference_check="FAIL"
fi
echo -e "RAPSearch NR database"
if [ -f $RAPSearch_NR_db ]
then
echo -e "\t$RAPSearch_NR_db: ${green}OK${endColor}"
else
echo -e "\t$RAPSearch_NR_db: ${red}BAD${endColor}"
echo
reference_check="FAIL"
fi
if [ -f $RAPSearch_NR_db.info ]
then
echo -e "\t$RAPSearch_NR_db.info: ${green}OK${endColor}"
else
echo -e "\t$RAPSearch_NR_db.info: ${red}BAD${endColor}"
echo
reference_check="FAIL"
fi
if [[ ($dependency_check = "FAIL" || $reference_check = "FAIL") ]]
then
echo -e "${red}There is an issue with one of the dependencies or reference databases above.${endColor}"
exit 65
else
echo -e "${green}All necessary dependencies and reference data pass.${endColor}"
fi
actual_slave_instances=$(ls -1 "$SNAP_COMPREHENSIVE_db_dir" | wc -l)
if [ $max_slave_instances -lt $actual_slave_instances ]
then
actual_slave_instances=$max_slave_instances
fi
length=$( expr length $( head -n2 $FASTQ_file | tail -1 ) ) # get length of 1st sequence in FASTQ file
contigcutoff=$(perl -le "print int(1.75 * $length)")
echo "-----------------------------------------------------------------------------------------"
echo "INPUT PARAMETERS"
echo "-----------------------------------------------------------------------------------------"
echo "Command Line Usage: $scriptname $@"
echo "SURPI version: $SURPI_version"
echo "config_file: $config_file"
echo "config file version: $config_file_version"
echo "Server: $host"
echo "Working directory: $( pwd )"
echo "run_mode: $run_mode"
echo "inputfile: $inputfile"
echo "inputtype: $inputtype"
echo "FASTQ_file: $FASTQ_file"
echo "cores used: $cores"
echo "Raw Read quality: $quality"
echo "Quality cutoff: $quality_cutoff"
echo "Read length_cutoff for preprocessing under which reads are thrown away: $length_cutoff"
echo "temporary files location: $temporary_files_directory"
echo "SNAP_db_directory housing the reference databases for Subtraction: $SNAP_subtraction_folder"
echo "SNAP_db_directory housing the reference databases for Comprehensive Mode: $SNAP_COMPREHENSIVE_db_dir"
echo "SNAP_db_directory housing the reference databases for Fast Mode: $SNAP_FAST_db_dir"
echo "snap_integrator: $snap_integrator"
echo "SNAP edit distance for SNAP to Human: d_human: $d_human"
echo "SNAP edit distance for SNAP to NT: d_NT_alignment: $d_NT_alignment"
echo "rapsearch_database: $rapsearch_database"
echo "RAPSearch indexed viral db used: $RAPSearch_VIRUS_db"
echo "RAPSearch indexed NR db used: $RAPSearch_NR_db"
echo "taxonomy database directory: $taxonomy_db_directory"
echo "adapter_set: $adapter_set"
echo "Raw Read length: $length"
echo "contigcutoff for abyss assembly unitigs: $contigcutoff"
echo "abysskmer length: $abysskmer"
echo "Ignore barcodes for assembly? $ignore_barcodes_for_de_novo"
echo "cache_reset (if 0, then dropcache will never be used): $cache_reset"
echo "start_nt: $start_nt"
echo "crop_length: $crop_length"
echo "e value for BLASTn used in coverage map generation: $eBLASTn"
if [ $snap_nt_procedure = "AWS_master_slave" ]
then
echo "---------------------------------------------"
echo "Cluster settings"
echo "snap_nt_procedure: $snap_nt_procedure"
echo "ami_id: $ami_id"
echo "max_slave_instances: $max_slave_instances"
echo "actual_slave_instances: $actual_slave_instances"
echo "instance_type: $instance_type"
echo "keypair: $keypair"
echo "security_group: $security_group"
echo "placement_group: $placement_group"
echo "availability_zone: $availability_zone"
echo "incoming_dir: $incoming_dir"
echo "---------------------------------------------"
fi
echo "-----------------------------------------------------------------------------------------"
if [ "$VERIFY_FASTQ" = 1 ]
then
fastQValidator --file $FASTQ_file --printBaseComp --avgQual --disableSeqIDCheck > quality.$basef.log
if [ $? -eq 0 ]
then
echo -e "${green}$FASTQ_file appears to be a valid FASTQ file. Check the quality.$basef.log file for details.${endColor}"
else
echo -e "${red}$FASTQ_file appears to be a invalid FASTQ file. Check the quality.$basef.log file for details.${endColor}"
echo -e "${red}You can bypass the quality check by not using the -v switch.${endColor}"
exit 65
fi
elif [ "$VERIFY_FASTQ" = 2 ]
then
fastQValidator --file $FASTQ_file --printBaseComp --avgQual > quality.$basef.log
if [ $? -eq 0 ]
then
echo -e "${green}$FASTQ_file appears to be a valid FASTQ file. Check the quality.$basef.log file for details.${endColor}"
else
echo -e "${red}$FASTQ_file appears to be a invalid FASTQ file. Check the quality.$basef.log file for details.${endColor}"
echo -e "${red}You can bypass the quality check by not using the -v switch.${endColor}"
exit 65
fi
elif [ "$VERIFY_FASTQ" = 3 ]
then
fastQValidator --file $FASTQ_file --printBaseComp --avgQual > quality.$basef.log
fi
if [[ $VERIFICATION -eq 1 ]] #stop pipeline if using verification mode
then
exit
fi
###########################################################
echo -e "$(date)\t$scriptname\t########## STARTING SURPI PIPELINE ##########"
START_PIPELINE=$(date +%s)
echo -e "$(date)\t$scriptname\tFound file $FASTQ_file"
echo -e "$(date)\t$scriptname\tAfter removing path: $nopathf"
############ Start up AWS slave machines ##################
file_with_slave_ips="slave_list.txt"
if [ "$snap_nt_procedure" = "AWS_master_slave" ]
then
# start the slaves as a background process. They should be ready to run at the SNAP to NT step in the pipeline.
start_slaves.sh $ami_id $actual_slave_instances $instance_type $keypair $security_group $availability_zone $file_with_slave_ips $placement_group & # > $basef.AWS.log 2>&1
fi
############ PREPROCESSING ##################
if [ "$preprocess" != "skip" ]
then
echo -e "$(date)\t$scriptname\t############### PREPROCESSING ###############"
echo -e "$(date)\t$scriptname\tStarting: preprocessing using $cores cores "
START_PREPROC=$(date +%s)
echo -e "$(date)\t$scriptname\tParameters: preprocess_ncores.sh $basef.fastq $quality N $length_cutoff $cores Y N $adapter_set $start_nt $crop_length $temporary_files_directory >& $basef.preprocess.log"
preprocess_ncores.sh $basef.fastq $quality N $length_cutoff $cores $cache_reset N $adapter_set $start_nt $crop_length $temporary_files_directory $quality_cutoff >& $basef.preprocess.log
echo -e "$(date)\t$scriptname\tDone: preprocessing "
END_PREPROC=$(date +%s)
diff_PREPROC=$(( END_PREPROC - START_PREPROC ))
echo -e "$(date)\t$scriptname\tPreprocessing took $diff_PREPROC seconds" | tee timing.$basef.log
fi
# verify preprocessing step
if [ ! -s $basef.cutadapt.fastq ] || [ ! -s $basef.preprocessed.fastq ]
then
echo -e "$(date)\t$scriptname\t${red}Preprocessing appears to have failed. One of the following files does not exist, or is of 0 size:${endColor}"
echo "$basef.cutadapt.fastq"
echo "$basef.preprocessed.fastq"
exit
fi
############# BEGIN SNAP PIPELINE #################
freemem=$(free -g | awk '{print $4}' | head -n 2 | tail -1)
echo -e "$(date)\t$scriptname\tThere is $freemem GB available free memory...[cutoff=$cache_reset GB]"
if [[ $dropcache == "Y" ]]
then
if [ "$freemem" -lt "$cache_reset" ]
then
echo -e "$(date)\t$scriptname\tClearing cache..."
dropcache
fi
fi
############# HUMAN MAPPING #################
if [ "$human_mapping" != "skip" ]
then
echo -e "$(date)\t$scriptname\t############### SNAP TO HUMAN ###############"
basef_h=${nopathf%.fastq}.preprocessed.s20.h250n25d${d_human}xfu # remove fastq extension
echo -e "$(date)\t$scriptname\tBase file: $basef_h"
echo -e "$(date)\t$scriptname\tStarting: $basef_h human mapping"
file_to_subtract="$basef.preprocessed.fastq"
subtracted_output_file="$basef_h.human.snap.unmatched.sam"
SUBTRACTION_COUNTER=0
START_SUBTRACTION=$(date +%s)
for SNAP_subtraction_db in $SNAP_subtraction_folder/*; do
SUBTRACTION_COUNTER=$[$SUBTRACTION_COUNTER +1]
# check if SNAP db is cached in RAM, use optimal parameters depending on result
SNAP_db_cached=$(vmtouch -m500G -f "$SNAP_subtraction_db" | grep 'Resident Pages' | awk '{print $5}')
if [[ "$SNAP_db_cached" == "100%" ]]
then
echo -e "$(date)\t$scriptname\tSNAP database is cached ($SNAP_db_cached)."
SNAP_cache_option=" -map "
else
echo -e "$(date)\t$scriptname\tSNAP database is not cached ($SNAP_db_cached)."
SNAP_cache_option=" -pre -map "
fi
echo -e "$(date)\t$scriptname\tParameters: snap-dev single $SNAP_subtraction_db $file_to_subtract -o -sam $subtracted_output_file.$SUBTRACTION_COUNTER.sam -t $cores -x -f -h 250 -d ${d_human} -n 25 -F u $SNAP_cache_option"
START_SUBTRACTION_STEP=$(date +%s)
snap-dev single "$SNAP_subtraction_db" "$file_to_subtract" -o -sam "$subtracted_output_file.$SUBTRACTION_COUNTER.sam" -t $cores -x -f -h 250 -d ${d_human} -n 25 -F u $SNAP_cache_option
END_SUBTRACTION_STEP=$(date +%s)
echo -e "$(date)\t$scriptname\tDone: SNAP to human"
diff_SUBTRACTION_STEP=$(( END_SUBTRACTION_STEP - START_SUBTRACTION_STEP ))
echo -e "$(date)\t$scriptname\tSubtraction step: $SUBTRACTION_COUNTER took $diff_SUBTRACTION_STEP seconds"
file_to_subtract="$subtracted_output_file.$SUBTRACTION_COUNTER.sam"
done
egrep -v "^@" "$subtracted_output_file.$SUBTRACTION_COUNTER.sam" | awk '{if($3 == "*") print "@"$1"\n"$10"\n""+"$1"\n"$11}' > $(echo "$basef_h".human.snap.unmatched.sam | sed 's/\(.*\)\..*/\1/').fastq
END_SUBTRACTION=$(date +%s)
diff_SUBTRACTION=$(( END_SUBTRACTION - START_SUBTRACTION ))
rm $subtracted_output_file.*.sam
echo -e "$(date)\t$scriptname\tSubtraction took $diff_SUBTRACTION seconds" | tee -a timing.$basef.log
fi
######dropcache?#############
freemem=$(free -g | awk '{print $4}' | head -n 2 | tail -1)
echo -e "$(date)\t$scriptname\tThere is $freemem GB available free memory...[cutoff=$cache_reset GB]"
if [[ $dropcache == "Y" ]]
then
if [ "$freemem" -lt "$cache_reset" ]
then
echo -e "$(date)\t$scriptname\tClearing cache..."
dropcache
fi
fi
############################# SNAP TO NT ##############################
if [ "$alignment" != "skip" ]
then
if [ ! -f $basef.NT.snap.sam ]
then
echo -e "$(date)\t$scriptname\t####### SNAP UNMATCHED SEQUENCES TO NT ######"
echo -e -n "$(date)\t$scriptname\tCalculating number of sequences to analyze using SNAP to NT: "
echo $(awk 'NR%4==1' "$basef_h".human.snap.unmatched.fastq | wc -l)
echo -e "$(date)\t$scriptname\tStarting: Mapping by SNAP to NT from $basef_h.human.snap.unmatched.fastq"
START_SNAPNT=$(date +%s)
# SNAP to NT for unmatched reads (d value threshold cutoff = 12)
if [ $run_mode = "Comprehensive" ]
then
if [ $snap_integrator = "inline" ]
then
echo -e "$(date)\t$scriptname\tParameters: snap_nt.sh $basef_h.human.snap.unmatched.fastq ${SNAP_COMPREHENSIVE_db_dir} $cores $cache_reset $d_NT_alignment $snap"
snap_nt.sh "$basef_h.human.snap.unmatched.fastq" "${SNAP_COMPREHENSIVE_db_dir}" "$cores" "$cache_reset" "$d_NT_alignment" "$snap"
elif [ $snap_integrator = "end" ]
then
if [ "$snap_nt_procedure" = "AWS_master_slave" ]
then
# transfer data to slave, start SNAP on each slave, and wait for results
#check if slave_setup is running before progressing to snap_on_slave.sh
#slave_setup should be responsible for verifying that all slaves are properly running.
echo -n -e "$(date)\t$scriptname\tWaiting for slave_setup to complete."
while [ ! -f $file_with_slave_ips ]
do
echo -n "."
sleep 2
done
echo
echo -e "$(date)\t$scriptname\tParameters: snap_on_slave.sh $basef_h.human.snap.unmatched.fastq $pemkey $file_with_slave_ips $incoming_dir ${basef}.NT.snap.sam $d_NT_alignment"
snap_on_slave.sh "$basef_h.human.snap.unmatched.fastq" "$pemkey" "$file_with_slave_ips" "$incoming_dir" "${basef}.NT.snap.sam" "$d_human"> "$basef.AWS.log" 2>&1
elif [ "$snap_nt_procedure" = "solo" ]
then
echo -e "$(date)\t$scriptname\tParameters: snap_nt_combine.sh $basef_h.human.snap.unmatched.fastq ${SNAP_COMPREHENSIVE_db_dir} $cores $cache_reset $d_NT_alignment $num_simultaneous_SNAP_runs"
snap_nt_combine.sh "$basef_h.human.snap.unmatched.fastq" "${SNAP_COMPREHENSIVE_db_dir}" "$cores" "$cache_reset" "$d_NT_alignment" "$num_simultaneous_SNAP_runs"
fi
fi
elif [ $run_mode = "Fast" ]
then
echo -e "$(date)\t$scriptname\tParameters: snap_nt.sh $basef_h.human.snap.unmatched.fastq ${SNAP_FAST_db_dir} $cores $cache_reset $d_NT_alignment $snap"
snap_nt.sh "$basef_h.human.snap.unmatched.fastq" "${SNAP_FAST_db_dir}" "$cores" "$cache_reset" "$d_NT_alignment" "$snap"
fi
echo -e "$(date)\t$scriptname\tDone: SNAP to NT"
END_SNAPNT=$(date +%s)
diff_SNAPNT=$(( END_SNAPNT - START_SNAPNT ))
echo -e "$(date)\t$scriptname\tSNAP to NT took $diff_SNAPNT seconds." | tee -a timing.$basef.log
mv -f "$basef_h.human.snap.unmatched.NT.sam" "$basef.NT.snap.sam"
fi
echo -e "$(date)\t$scriptname\tStarting: parsing $basef.NT.snap.sam"
echo -e "$(date)\t$scriptname\textract matched/unmatched $basef.NT.snap.sam"
egrep -v "^@" $basef.NT.snap.sam | awk '{if($3 != "*") print }' > $basef.NT.snap.matched.sam
egrep -v "^@" $basef.NT.snap.sam | awk '{if($3 == "*") print }' > $basef.NT.snap.unmatched.sam
echo -e "$(date)\t$scriptname\tconvert sam to fastq from $basef.NT.snap.sam"
echo -e "$(date)\t$scriptname\tDone: parsing $basef.NT.snap.unmatched.sam"
if [ ! -f "$basef.NT.snap.matched.all.annotated" ]
then
## convert to FASTQ and retrieve full-length sequences
echo -e "$(date)\t$scriptname\tconvert to FASTQ and retrieve full-length sequences for SNAP NT matched hits"
echo -e "$(date)\t$scriptname\tParameters: extractHeaderFromFastq_ncores.sh $cores $basef.cutadapt.fastq $basef.NT.snap.matched.sam $basef.NT.snap.matched.fulllength.fastq $basef.NT.snap.unmatched.sam $basef.NT.snap.unmatched.fulllength.fastq"
extractHeaderFromFastq_ncores.sh "$cores" "$basef.cutadapt.fastq" "$basef.NT.snap.matched.sam" "$basef.NT.snap.matched.fulllength.fastq" "$basef.NT.snap.unmatched.sam" "$basef.NT.snap.unmatched.fulllength.fastq" #SNN140507
sort -k1,1 "$basef.NT.snap.matched.sam" > "$basef.NT.snap.matched.sorted.sam"
cut -f1-9 "$basef.NT.snap.matched.sorted.sam" > "$basef.NT.snap.matched.sorted.sam.tmp1"
cut -f12- "$basef.NT.snap.matched.sorted.sam" > "$basef.NT.snap.matched.sorted.sam.tmp2" #SNN140507 -f11 -> -f12
awk '(NR%4==1) {printf("%s\t",$0)} (NR%4==2) {printf("%s\t", $0)} (NR%4==0) {printf("%s\n",$0)}' "$basef.NT.snap.matched.fulllength.fastq" | sort -k1,1 | awk '{print $2 "\t" $3}' > "$basef.NT.snap.matched.fulllength.sequence.txt" #SNN140507 change this to bring in quality lines as well
paste "$basef.NT.snap.matched.sorted.sam.tmp1" "$basef.NT.snap.matched.fulllength.sequence.txt" "$basef.NT.snap.matched.sorted.sam.tmp2" > "$basef.NT.snap.matched.fulllength.sam"
###retrieve taxonomy matched to NT ###
echo -e "$(date)\t$scriptname\ttaxonomy retrieval for $basef.NT.snap.matched.fulllength.sam"
echo -e "$(date)\t$scriptname\tParameters: taxonomy_lookup.pl $basef.NT.snap.matched.fulllength.sam sam nucl $cores $taxonomy_db_directory"
taxonomy_lookup.pl "$basef.NT.snap.matched.fulllength.sam" sam nucl $cores $taxonomy_db_directory
sort -k 13.7n "$basef.NT.snap.matched.fulllength.all.annotated" > "$basef.NT.snap.matched.fulllength.all.annotated.sorted" # sam format is no longer disturbed
rm -f "$basef.NT.snap.matched.fulllength.gi" "$basef.NT.snap.matched.fullength.gi.taxonomy"
fi
# adjust filenames for FAST mode
grep "Viruses;" "$basef.NT.snap.matched.fulllength.all.annotated.sorted" > "$basef.NT.snap.matched.fl.Viruses.annotated"
grep "Bacteria;" "$basef.NT.snap.matched.fulllength.all.annotated.sorted" > "$basef.NT.snap.matched.fl.Bacteria.annotated"
##SNN140507 cleanup bacterial reads
echo -e "$(date)\t$scriptname\tParameters: ribo_snap_bac_euk.sh $basef.NT.snap.matched.fl.Bacteria.annotated BAC $cores $ribo_snap_bac_euk_directory"
ribo_snap_bac_euk.sh $basef.NT.snap.matched.fl.Bacteria.annotated BAC $cores $ribo_snap_bac_euk_directory #SNN140507
if [ $run_mode = "Comprehensive" ]
then
grep "Primates;" "$basef.NT.snap.matched.fulllength.all.annotated.sorted" > "$basef.NT.snap.matched.fl.Primates.annotated"
grep -v "Primates" "$basef.NT.snap.matched.fulllength.all.annotated.sorted" | grep "Mammalia" > "$basef.NT.snap.matched.fl.nonPrimMammal.annotated"
grep -v "Mammalia" "$basef.NT.snap.matched.fulllength.all.annotated.sorted" | grep "Chordata" > "$basef.NT.snap.matched.fl.nonMammalChordat.annotated"
grep -v "Chordata" "$basef.NT.snap.matched.fulllength.all.annotated.sorted" | grep "Eukaryota" > "$basef.NT.snap.matched.fl.nonChordatEuk.annotated"
ribo_snap_bac_euk.sh $basef.NT.snap.matched.fl.nonChordatEuk.annotated EUK $cores $ribo_snap_bac_euk_directory
fi
echo -e "$(date)\t$scriptname\tDone taxonomy retrieval"
echo -e "$(date)\t$scriptname\tParameters: table_generator.sh $basef.NT.snap.matched.fl.Viruses.annotated SNAP Y Y Y Y>& $basef.table_generator_snap.matched.fl.log"
table_generator.sh "$basef.NT.snap.matched.fl.Viruses.annotated" SNAP Y Y Y Y>& "$basef.table_generator_snap.matched.fl.log"
if [ $run_mode = "Comprehensive" ]
then
### convert to FASTQ and retrieve full-length sequences to add to unmatched SNAP for viral RAPSearch###
egrep -v "^@" "$basef.NT.snap.matched.fl.Viruses.annotated" | awk '{if($3 != "*") print "@"$1"\n"$10"\n""+"$1"\n"$11}' > $(echo "$basef.NT.snap.matched.fl.Viruses.annotated" | sed 's/\(.*\)\..*/\1/').fastq
echo -e "$(date)\t$scriptname\tDone: convert to FASTQ and retrieve full-length sequences for SNAP NT hits "
fi
echo -e "$(date)\t$scriptname\t############# SORTING unmatched to NT BY LENGTH AND UNIQ AND LOOKUP ORIGINAL SEQUENCES #################"
if [ $run_mode = "Comprehensive" ]
then
#SNN 140507 extractHeaderFromFastq.csh "$basef.NT.snap.unmatched.fastq" FASTQ "$basef.cutadapt.fastq" "$basef.NT.snap.unmatched.fulllength.fastq"
sed "n;n;n;d" "$basef.NT.snap.unmatched.fulllength.fastq" | sed "n;n;d" | sed "s/^@/>/g" > "$basef.NT.snap.unmatched.fulllength.fasta"
fi
cat "$basef.NT.snap.unmatched.fulllength.fasta" | perl -e 'while (<>) {$h=$_; $s=<>; $seqs{$h}=$s;} foreach $header (reverse sort {length($seqs{$a}) <=> length($seqs{$b})} keys %seqs) {print $header.$seqs{$header}}' > $basef.NT.snap.unmatched.fulllength.sorted.fasta
if [ $run_mode = "Comprehensive" ]
then
echo -e "$(date)\t$scriptname\twe will be using 50 as the length of the cropped read for removing unique and low-complexity reads"
echo -e "$(date)\t$scriptname\tParameters: crop_reads.csh $basef.NT.snap.unmatched.fulllength.sorted.fasta 25 50 > $basef.NT.snap.unmatched.fulllength.sorted.cropped.fasta"
crop_reads.csh "$basef.NT.snap.unmatched.fulllength.sorted.fasta" 25 50 > "$basef.NT.snap.unmatched.fulllength.sorted.cropped.fasta"
echo -e "$(date)\t$scriptname\t*** reads cropped ***"
echo -e "$(date)\t$scriptname\tParameters: gt sequniq -seqit -force -o $basef.NT.snap.unmatched.fulllength.sorted.cropped.uniq.fasta $basef.NT.snap.unmatched.fulllength.sorted.cropped.fasta"
gt sequniq -seqit -force -o "$basef.NT.snap.unmatched.fulllength.sorted.cropped.uniq.fasta" "$basef.NT.snap.unmatched.fulllength.sorted.cropped.fasta"
echo -e "$(date)\t$scriptname\tParameters: extractAlltoFast.sh $basef.NT.snap.unmatched.fulllength.sorted.cropped.uniq.fasta FASTA $basef.NT.snap.unmatched.fulllength.fasta FASTA $basef.NT.snap.unmatched.uniq.fl.fasta FASTA"
extractAlltoFast.sh "$basef.NT.snap.unmatched.fulllength.sorted.cropped.uniq.fasta" FASTA "$basef.NT.snap.unmatched.fulllength.fasta" FASTA "$basef.NT.snap.unmatched.uniq.fl.fasta" FASTA #SNN140507
fi
echo -e "$(date)\t$scriptname\tDone uniquing full length sequences of unmatched to NT"
fi
####################### DENOVO CONTIG ASSEMBLY #####
if [ $run_mode = "Comprehensive" ]
then
echo -e "$(date)\t$scriptname\t######### Running ABYSS and Minimus #########"
START_deNovo=$(date +%s)
echo -e "$(date)\t$scriptname\tAdding matched viruses to NT unmatched"
sed "n;n;n;d" "$basef.NT.snap.matched.fl.Viruses.fastq" | sed "n;n;d" | sed "s/^@/>/g" | sed 's/>/>Vir/g' > "$basef.NT.snap.matched.fl.Viruses.fasta"
gt sequniq -seqit -force -o "$basef.NT.snap.matched.fl.Viruses.uniq.fasta" "$basef.NT.snap.matched.fl.Viruses.fasta"
cat "$basef.NT.snap.unmatched.uniq.fl.fasta" "$basef.NT.snap.matched.fl.Viruses.uniq.fasta" > "$basef.NT.snap.unmatched_addVir_uniq.fasta"
echo -e "$(date)\t$scriptname\tStarting deNovo assembly"
echo -e "$(date)\t$scriptname\tParameters: abyss_minimus.sh $basef.NT.snap.unmatched_addVir_uniq.fasta $length $contigcutoff $cores $abysskmer $ignore_barcodes_for_de_novo"
abyss_minimus.sh "$basef.NT.snap.unmatched_addVir_uniq.fasta" "$length" "$contigcutoff" "$cores" "$abysskmer" "$ignore_barcodes_for_de_novo"
echo -e "$(date)\t$scriptname\tCompleted deNovo assembly: generated all.$basef.NT.snap.unmatched_addVir_uniq.fasta.unitigs.cut${length}.${contigcutoff}-mini.fa"
END_deNovo=$(date +%s)
diff_deNovo=$(( END_deNovo - START_deNovo ))
echo -e "$(date)\t$scriptname\tdeNovo Assembly took $diff_deNovo seconds." | tee -a timing.$basef.log
fi
#######RAPSearch#####
#################### RAPSearch to Vir ###########
if [ $run_mode = "Comprehensive" ]
then
if [ "$rapsearch_database" == "Viral" ]
then
if [ -f "$basef.NT.snap.unmatched.uniq.fl.fasta" ]
then
echo -e "$(date)\t$scriptname\t############# RAPSearch to ${RAPSearch_VIRUS_db} ON NT-UNMATCHED SEQUENCES #################"
if [[ $dropcache == "Y" ]]
then
dropcache
fi
echo -e "$(date)\t$scriptname\tStarting: RAPSearch $basef.NT.snap.unmatched.uniq.fl.fasta "
START14=$(date +%s)
echo -e "$(date)\t$scriptname\tParameters: rapsearch -q $basef.NT.snap.unmatched.uniq.fl.fasta -d $RAPSearch_VIRUS_db -o $basef.$rapsearch_database.RAPSearch.e1 -z $cores -e $ecutoff_Vir -v 1 -b 1 -t N >& $basef.$rapsearch_database.RAPSearch.log"
rapsearch -q "$basef.NT.snap.unmatched.uniq.fl.fasta" -d $RAPSearch_VIRUS_db -o $basef.$rapsearch_database.RAPSearch.e${ecutoff_Vir} -z "$cores" -e "$ecutoff_Vir" -v 1 -b 1 -t N >& $basef.$rapsearch_database.RAPSearch.log
echo -e "$(date)\t$scriptname\tDone RAPSearch"
END14=$(date +%s)
diff=$(( END14 - START14 ))
echo -e "$(date)\t$scriptname\tRAPSearch to Vir Took $diff seconds"
echo -e "$(date)\t$scriptname\tStarting: add FASTA sequences to RAPSearch m8 output file "
START15=$(date +%s)
sed -i '/^#/d' $basef.$rapsearch_database.RAPSearch.e${ecutoff_Vir}.m8
seqtk subseq $basef.NT.snap.unmatched.uniq.fl.fasta $basef.$rapsearch_database.RAPSearch.e${ecutoff_Vir}.m8 > $basef.$rapsearch_database.RAPSearch.e${ecutoff_Vir}.m8.fasta
sed '/>/d' $basef.$rapsearch_database.RAPSearch.e${ecutoff_Vir}.m8.fasta > $basef.$rapsearch_database.RAPSearch.e${ecutoff_Vir}.m8.fasta.seq