Skip to content
Snippets Groups Projects
Commit 1c3a166b authored by Venkat Malladi's avatar Venkat Malladi
Browse files

Merge branch '2-process_createManifest' into 'develop'

Resolve "process_createManifest"

Closes #2

See merge request !14
parents d499ea11 32aae481
Branches
Tags
2 merge requests!37v0.0.1,!14Resolve "process_createManifest"
Pipeline #5753 passed with stages
in 39 minutes and 15 seconds
......@@ -26,6 +26,15 @@ getData:
- singularity run 'docker://bicf/gudmaprbkfilexfer:1.3' sh ./workflow/scripts/bdbagFetch.sh Replicate_16-1ZX4 16-1ZX4
- pytest -m getData
parseMetadata:
stage: unit
script:
- singularity run 'docker://bicf/python3:1.3' python3 ./workflow/scripts/parseMeta.py -r Replicate_RID -m "./test_data/meta/metaTest.csv" -p repRID
- singularity run 'docker://bicf/python3:1.3' python3 ./workflow/scripts/parseMeta.py -r Replicate_RID -m "./test_data/meta/metaTest.csv" -p ends
- singularity run 'docker://bicf/python3:1.3' python3 ./workflow/scripts/parseMeta.py -r Replicate_RID -m "./test_data/meta/metaTest.csv" -p endsManual
- singularity run 'docker://bicf/python3:1.3' python3 ./workflow/scripts/parseMeta.py -r Replicate_RID -m "./test_data/meta/metaTest.csv" -p stranded
- singularity run 'docker://bicf/python3:1.3' python3 ./workflow/scripts/parseMeta.py -r Replicate_RID -m "./test_data/meta/metaTest.csv" -p specie
trimData:
stage: unit
script:
......
......@@ -13,14 +13,7 @@ process {
cpus = 1
memory = '1 GB'
withName:getBag {
container = 'bicf/gudmaprbkfilexfer:1.3'
}
withName:getData {
container = 'bicf/gudmaprbkfilexfer:1.3'
}
withName:trimData {
container = 'bicf/trimgalore:1.1'
cpus = 15
}
}
\ No newline at end of file
......@@ -13,14 +13,7 @@ process {
cpus = 1
memory = '1 GB'
withName:getBag {
container = 'bicf/gudmaprbkfilexfer:1.3'
}
withName:getData {
container = 'bicf/gudmaprbkfilexfer:1.3'
}
withName:trimData {
container = 'bicf/trimgalore:1.1'
cpus = 15
}
}
......@@ -5,16 +5,16 @@ process {
withName:getBag {
executor = 'local'
container = 'docker://bicf/gudmaprbkfilexfer:1.3'
}
withName:getData {
executor = 'local'
container = 'docker://bicf/gudmaprbkfilexfer:1.3'
}
withName:trimData {
container = 'docker://bicf/trimgalore:1.1'
queue = '256GB,256GBv1,384GB'
}
withName:parseMetadata {
executor = 'local'
}
}
singularity {
......
......@@ -10,6 +10,21 @@ profiles {
}
}
process {
withName:getBag {
container = 'bicf/gudmaprbkfilexfer:1.3'
}
withName:getData {
container = 'bicf/gudmaprbkfilexfer:1.3'
}
withName:trimData {
container = 'bicf/trimgalore:1.1'
}
withName:parseMetadata {
container = 'bicf/python3:1.3'
}
}
trace {
enabled = true
file = 'pipeline_trace.txt'
......
#!/usr/bin/env nextflow
// Define input variables
params.deriva = "${baseDir}/../test_data/credential.json"
params.bdbag = "${baseDir}/../test_data/cookies.txt"
params.deriva = "${baseDir}/../test_data/auth/credential.json"
params.bdbag = "${baseDir}/../test_data/auth/cookies.txt"
//params.repRID = "16-1ZX4"
params.repRID = "Q-Y5JA"
......@@ -25,6 +25,7 @@ derivaConfig = Channel.fromPath("${baseDir}/conf/replicate_export_config.json")
// Define script files
script_bdbagFetch = Channel.fromPath("${baseDir}/scripts/bdbagFetch.sh")
script_parseMeta = Channel.fromPath("${baseDir}/scripts/parseMeta.py")
/*
* getData: get bagit file from consortium
......@@ -75,7 +76,6 @@ process getData {
file("**/Experiment.csv") into experimentMeta
file ("${repRID}.getData.err")
script:
"""
hostname >>${repRID}.getData.err
......@@ -95,11 +95,67 @@ process getData {
echo "LOG: replicate bdbag unzipped" >>${repRID}.getData.err
# bagit fetch fastq's only and rename by repRID
sh bdbagFetch.sh \${replicate} ${repRID} 2>>${repRID}.getData.err
sh ${script_bdbagFetch} \${replicate} ${repRID} 2>>${repRID}.getData.err
echo "LOG: replicate bdbag fetched" >>${repRID}.getData.err
"""
}
/*
* parseMetadata: parses metadata to extract experiment parameters
*/
process parseMetadata {
tag "${repRID}"
publishDir "${logsDir}", mode: 'copy', pattern: "${repRID}.parseMetadata.err"
input:
path script_parseMeta
val repRID
path fileMeta
path experimentSettingsMeta
path experimentMeta
output:
path 'design.csv' into metadata
script:
"""
hostname >>${repRID}.parseMetadata.err
ulimit -a >>${repRID}.parseMetadata.err
# Check replicate RID metadata
rep=\$(python3 ${script_parseMeta} -r ${repRID} -m "${fileMeta}" -p repRID)
echo "LOG: replicate RID metadata parsed: \${rep}" >>${repRID}.parseMetadata.err
# Get endedness metadata
endsMeta=\$(python3 ${script_parseMeta} -r ${repRID} -m "${experimentSettingsMeta}" -p endsMeta)
echo "LOG: endedness metadata parsed: \${endsMeta}" >>${repRID}.parseMetadata.err
# Manually get endness
endsManual=\$(python3 ${script_parseMeta} -r ${repRID} -m "${fileMeta}" -p endsManual)
echo "LOG: endedness manually detected: \${endsManual}" >>${repRID}.parseMetadata.err
# Get strandedness metadata
stranded=\$(python3 ${script_parseMeta} -r ${repRID} -m "${experimentSettingsMeta}" -p stranded)
echo "LOG: strandedness metadata parsed: \${stranded}" >>${repRID}.parseMetadata.err
# Get spike-in metadata
spike=\$(python3 ${script_parseMeta} -r ${repRID} -m "${experimentSettingsMeta}" -p spike)
echo "LOG: spike-in metadata parsed: \${spike}" >>${repRID}.parseMetadata.err
# Get species metadata
species=\$(python3 ${script_parseMeta} -r ${repRID} -m "${experimentMeta}" -p species)
echo "LOG: species metadata parsed: \${species}" >>${repRID}.parseMetadata.err
# Save design file
echo "\${rep},\${endsMeta},\${endsManual},\${stranded},\${spike},\${species}" > design.csv
"""
}
metadata.splitCsv(sep: ',', header: false).into {
metadata_trimData
metadata_qc
}
/*
* trimData: trims any adapter or non-host sequences from the data
*/
......@@ -109,10 +165,10 @@ process trimData {
input:
file(fastq) from fastqs
tuple val(rep), val(endsMeta), val(endsManual), val(stranded), val(spike), val(species) from metadata_trimData
output:
path ("*.fq.gz") into fastqs_trimmed
val ends
file ("${repRID}.trimData.log")
file ("${repRID}.trimData.err")
......@@ -122,12 +178,10 @@ process trimData {
ulimit -a >>${repRID}.trimData.err
# trim fastqs
if [ '${fastq[1]}' == 'null' ]
if [ '${endsManual}' == 'se' ]
then
ends='se'
trim_galore --gzip -q 25 --illumina --length 35 --basename ${repRID} -j `nproc` ${fastq[0]} 1>>${repRID}.trimData.log 2>>${repRID}.trimData.err;
else
ends='pe'
trim_galore --gzip -q 25 --illumina --length 35 --paired --basename ${repRID} -j `nproc` ${fastq[0]} ${fastq[1]} 1>>${repRID}.trimData.log 2>>${repRID}.trimData.err;
fi
"""
......
#!/usr/bin/env python3
import argparse
import pandas as pd
import re
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--files',help="The fetch file from bdgap.zip.",required=True)
args = parser.parse_args()
return args
def main():
args = get_args()
fetchFile = pd.read_csv(args.files+"/fetch.txt",sep="\t",header=None)
fileFile = pd.read_csv(args.files+"/data/File.csv",sep=",",header=0)
fileFile_filtered = fileFile[fileFile["File_Type"]=="FastQ"]
fetchFile_filtered = fetchFile[fetchFile[2].str[-9:]==".fastq.gz"]
fetchFile_filtered_renamed = fetchFile_filtered
for i in fileFile_filtered["File_Name"]:
fetchFile_filtered_renamed[2][fetchFile_filtered_renamed[2].str.contains(i,regex=False)] = fetchFile_filtered_renamed[2][fetchFile_filtered_renamed[2].str.contains(i,regex=False)].values[0].replace(re.sub("\.R.\.fastq\.gz","",i),fileFile_filtered["Replicate_RID"][fileFile_filtered["File_Name"]==i].values[0])
fetchFile_filtered_renamed.to_csv(args.files+"/fetch.txt",sep="\t",header=False,index=False)
if __name__ == '__main__':
main()
\ No newline at end of file
#!/usr/bin/env python3
import argparse
import pandas as pd
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--repRID',help="The replicate RID.",required=True)
parser.add_argument('-m', '--metaFile',help="The metadata file to extract.",required=True)
parser.add_argument('-p', '--parameter',help="The parameter to extract.",required=True)
args = parser.parse_args()
return args
def main():
args = get_args()
metaFile = pd.read_csv(args.metaFile,sep=",",header=0)
# Check replicate RID metadata from 'File.csv'
if (args.parameter == "repRID"):
if (len(metaFile.Replicate_RID.unique()) > 1):
print("There are multiple replicate RID's in the metadata: " + " ".join(metaFile.Replicate_RID.unique()))
exit(1)
if not (metaFile.Replicate_RID.unique() == args.repRID):
print("Replicate RID in metadata does not match run parameters: " + metaFile.Replicate_RID.unique() + " vs " + args.repRID)
exit(1)
else:
rep=metaFile["Replicate_RID"].unique()[0]
print(rep)
if (len(metaFile[metaFile["File_Type"] == "FastQ"]) > 2):
print("There are more then 2 fastq's in the metadata: " + " ".join(metaFile[metaFile["File_Type"] == "FastQ"].RID))
exit(1)
# Get endedness metadata from 'Experiment Settings.csv'
if (args.parameter == "endsMeta"):
if (metaFile.Paired_End.unique() == "Single End"):
endsMeta = "se"
elif (metaFile.Paired_End.unique() == "Paired End"):
endsMeta = "pe"
else:
endsMeta = "uk"
print(endsMeta)
# Manually get endness count from 'File.csv'
if (args.parameter == "endsManual"):
if (len(metaFile[metaFile["File_Type"] == "FastQ"]) == 1):
endsManual = "se"
elif (len(metaFile[metaFile["File_Type"] == "FastQ"]) == 2):
endsManual = "pe"
print(endsManual)
# Get strandedness metadata from 'Experiment Settings.csv'
if (args.parameter == "stranded"):
if (metaFile.Has_Strand_Specific_Information.unique() == "yes"):
stranded = "stranded"
elif (metaFile.Has_Strand_Specific_Information.unique() == "no"):
stranded = "unstranded"
else:
print("Stranded metadata not match expected options: " + metaFile.Has_Strand_Specific_Information.unique())
exit(1)
print(stranded)
# Get spike-in metadata from 'Experiment Settings.csv'
if (args.parameter == "spike"):
if (metaFile.Used_Spike_Ins.unique() == "yes"):
spike = "yes"
elif (metaFile.Used_Spike_Ins.unique() == "no"):
spike = "no"
else:
print("Spike-ins metadata not match expected options: " + metaFile.Used_Spike_Ins.unique())
exit(1)
print(spike)
# Get species metadata from 'Experiment.csv'
if (args.parameter == "species"):
if (metaFile.Species.unique() == "Mus musculus"):
species = "Mus musculus"
elif (metaFile.Species.unique() == "Homo sapiens"):
species = "Homo sapiens"
else:
print("Species metadata not match expected options: " + metaFile.Species.unique())
exit(1)
print(species)
if __name__ == '__main__':
main()
\ No newline at end of file
#!/usr/bin/env python3
import argparse
import pandas as pd
import os
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--files',help="The fetch file from bdgap.zip.",required=True)
args = parser.parse_args()
return args
def main():
args = get_args()
fetchFile = pd.read_csv(args.files+"/fetch.txt",sep="\t",header=None)
fileFile = pd.read_csv(args.files+"/data/File.csv",sep=",",header=0)
replicateRID = fileFile.Replicate_RID.unique()
fetchArray = {i:fileFile.URI[(fileFile.Replicate_RID == i) & (fileFile.File_Type == "FastQ")] for i in replicateRID}
for i in replicateRID:
if not os.path.exists(i):
os.mkdir("Replicate_"+i)
fetchFile[fetchFile[0].str.contains('|'.join(fetchArray[i]))].to_csv("Replicate_"+i+"/fetch.txt",sep="\t",header=False,index=False)
if __name__ == '__main__':
main()
\ No newline at end of file
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment