From 2ec44620f03bce997668b349f9d346afbfd03ed8 Mon Sep 17 00:00:00 2001
From: "Gervaise H. Henry" <gervaise.henry@utsouthwestern.edu>
Date: Mon, 14 Sep 2020 12:17:12 -0500
Subject: [PATCH] Auto pep8'd all python code

---
 workflow/scripts/extractRefData.py       | 10 +++--
 workflow/scripts/parseMeta.py            | 51 +++++++++++++++---------
 workflow/scripts/splitStudy.py           | 13 ++++--
 workflow/scripts/tinHist.py              | 47 +++++++++++++---------
 workflow/scripts/utils.py                | 25 +++++-------
 workflow/tests/test_alignReads.py        | 20 ++++++----
 workflow/tests/test_consistency.py       | 17 +++++---
 workflow/tests/test_dataQC.py            | 10 +++--
 workflow/tests/test_dedupReads.py        | 26 +++++++-----
 workflow/tests/test_downsampleData.py    |  5 ++-
 workflow/tests/test_fastqc.py            |  6 ++-
 workflow/tests/test_getBag.py            |  6 ++-
 workflow/tests/test_getData.py           |  9 +++--
 workflow/tests/test_inferMetadata.py     |  7 ++--
 workflow/tests/test_makeBigWig.py        |  2 +-
 workflow/tests/test_makeFeatureCounts.py | 11 +++--
 workflow/tests/test_outputBag.py         |  3 +-
 workflow/tests/test_parseMetadata.py     |  4 +-
 workflow/tests/test_trimData.py          | 12 ++++--
 19 files changed, 176 insertions(+), 108 deletions(-)

diff --git a/workflow/scripts/extractRefData.py b/workflow/scripts/extractRefData.py
index fb3668e..bf06c95 100644
--- a/workflow/scripts/extractRefData.py
+++ b/workflow/scripts/extractRefData.py
@@ -5,15 +5,18 @@ import pandas as pd
 import warnings
 warnings.simplefilter(action='ignore', category=FutureWarning)
 
+
 def get_args():
     parser = argparse.ArgumentParser()
-    parser.add_argument('-r', '--returnParam',help="The parameter to return (URL or MD5).",required=True)
+    parser.add_argument('-r', '--returnParam',
+                        help="The parameter to return (URL or MD5).", required=True)
     args = parser.parse_args()
     return args
 
+
 def main():
     args = get_args()
-    refQuery=pd.read_json("refQuery.json")
+    refQuery = pd.read_json("refQuery.json")
     if refQuery["File_URL"].count() == 1:
         if args.returnParam == "URL":
             print(refQuery["File_URL"].values[0])
@@ -23,7 +26,8 @@ def main():
             print(refQuery["File_MD5"].values[0])
     else:
         raise Exception("Multple references found: \n%s" %
-            refQuery["RID"])
+                        refQuery["RID"])
+
 
 if __name__ == '__main__':
     main()
diff --git a/workflow/scripts/parseMeta.py b/workflow/scripts/parseMeta.py
index 5000542..16411df 100644
--- a/workflow/scripts/parseMeta.py
+++ b/workflow/scripts/parseMeta.py
@@ -5,52 +5,61 @@ import pandas as pd
 import warnings
 warnings.simplefilter(action='ignore', category=FutureWarning)
 
+
 def get_args():
     parser = argparse.ArgumentParser()
-    parser.add_argument('-r', '--repRID',help="The replicate RID.",required=True)
-    parser.add_argument('-m', '--metaFile',help="The metadata file to extract.",required=True)
-    parser.add_argument('-p', '--parameter',help="The parameter to extract.",required=True)
+    parser.add_argument(
+        '-r', '--repRID', help="The replicate RID.", required=True)
+    parser.add_argument('-m', '--metaFile',
+                        help="The metadata file to extract.", required=True)
+    parser.add_argument('-p', '--parameter',
+                        help="The parameter to extract.", required=True)
     args = parser.parse_args()
     return args
 
 
 def main():
     args = get_args()
-    metaFile = pd.read_csv(args.metaFile,sep=",",header=0)
+    metaFile = pd.read_csv(args.metaFile, sep=",", header=0)
 
     # Check replicate RID metadata from 'File.csv'
     if (args.parameter == "repRID"):
         if (len(metaFile.Replicate_RID.unique()) > 1):
-            print("There are multiple replicate RID's in the metadata: " + " ".join(metaFile.Replicate_RID.unique()))
+            print("There are multiple replicate RID's in the metadata: " +
+                  " ".join(metaFile.Replicate_RID.unique()))
             exit(1)
         if not (metaFile.Replicate_RID.unique() == args.repRID):
-            print("Replicate RID in metadata does not match run parameters: " + metaFile.Replicate_RID.unique() + " vs " + args.repRID)
+            print("Replicate RID in metadata does not match run parameters: " +
+                  metaFile.Replicate_RID.unique() + " vs " + args.repRID)
             exit(1)
         else:
-            rep=metaFile["Replicate_RID"].unique()[0]
+            rep = metaFile["Replicate_RID"].unique()[0]
             print(rep)
         if (len(metaFile[metaFile["File_Type"] == "FastQ"]) > 2):
-            print("There are more then 2 fastq's in the metadata: " + " ".join(metaFile[metaFile["File_Type"] == "FastQ"].RID))
+            print("There are more then 2 fastq's in the metadata: " +
+                  " ".join(metaFile[metaFile["File_Type"] == "FastQ"].RID))
             exit(1)
 
     # Check experiment RID metadata from 'Experiment.csv'
     if (args.parameter == "expRID"):
         if (len(metaFile.Experiment_RID.unique()) > 1):
-            print("There are multiple experoment RID's in the metadata: " + " ".join(metaFile.Experiment_RID.unique()))
+            print("There are multiple experoment RID's in the metadata: " +
+                  " ".join(metaFile.Experiment_RID.unique()))
             exit(1)
         else:
-            exp=metaFile["Experiment_RID"].unique()[0]
+            exp = metaFile["Experiment_RID"].unique()[0]
             print(exp)
 
     # Check study RID metadata from 'Experiment.csv'
     if (args.parameter == "studyRID"):
         if (len(metaFile.Study_RID.unique()) > 1):
-            print("There are multiple study RID's in the metadata: " + " ".join(metaFile.Study_RID.unique()))
+            print("There are multiple study RID's in the metadata: " +
+                  " ".join(metaFile.Study_RID.unique()))
             exit(1)
         else:
-            study=metaFile["Study_RID"].unique()[0]
+            study = metaFile["Study_RID"].unique()[0]
             print(study)
-    
+
     # Get endedness metadata from 'Experiment Settings.csv'
     if (args.parameter == "endsMeta"):
         if (metaFile.Paired_End.unique() == "Single End"):
@@ -60,7 +69,7 @@ def main():
         else:
             endsMeta = "uk"
         print(endsMeta)
-    
+
     # Manually get endness count from 'File.csv'
     if (args.parameter == "endsManual"):
         if (len(metaFile[metaFile["File_Type"] == "FastQ"]) == 1):
@@ -68,7 +77,7 @@ def main():
         elif (len(metaFile[metaFile["File_Type"] == "FastQ"]) == 2):
             endsManual = "pe"
         print(endsManual)
-    
+
     # Get strandedness metadata from 'Experiment Settings.csv'
     if (args.parameter == "stranded"):
         if (metaFile.Has_Strand_Specific_Information.unique() == "yes"):
@@ -76,10 +85,11 @@ def main():
         elif (metaFile.Has_Strand_Specific_Information.unique() == "no"):
             stranded = "unstranded"
         else:
-            print("Stranded metadata not match expected options: " + metaFile.Has_Strand_Specific_Information.unique())
+            print("Stranded metadata not match expected options: " +
+                  metaFile.Has_Strand_Specific_Information.unique())
             exit(1)
         print(stranded)
-    
+
     # Get spike-in metadata from 'Experiment Settings.csv'
     if (args.parameter == "spike"):
         if (metaFile.Used_Spike_Ins.unique() == "yes"):
@@ -87,7 +97,8 @@ def main():
         elif (metaFile.Used_Spike_Ins.unique() == "no"):
             spike = "no"
         else:
-            print("Spike-ins metadata not match expected options: " + metaFile.Used_Spike_Ins.unique())
+            print("Spike-ins metadata not match expected options: " +
+                  metaFile.Used_Spike_Ins.unique())
             exit(1)
         print(spike)
 
@@ -98,7 +109,8 @@ def main():
         elif (metaFile.Species.unique() == "Homo sapiens"):
             species = "Homo sapiens"
         else:
-            print("Species metadata not match expected options: " + metaFile.Species.unique())
+            print("Species metadata not match expected options: " +
+                  metaFile.Species.unique())
             exit(1)
         print(species)
 
@@ -107,5 +119,6 @@ def main():
         readLength = metaFile.Read_Length.unique()
         print(str(readLength).strip('[]'))
 
+
 if __name__ == '__main__':
     main()
diff --git a/workflow/scripts/splitStudy.py b/workflow/scripts/splitStudy.py
index 82ffc28..bf1129e 100644
--- a/workflow/scripts/splitStudy.py
+++ b/workflow/scripts/splitStudy.py
@@ -5,20 +5,25 @@ import pandas as pd
 import warnings
 warnings.simplefilter(action='ignore', category=FutureWarning)
 
+
 def get_args():
     parser = argparse.ArgumentParser()
-    parser.add_argument('-s', '--studyRID',help="The study RID.",required=True)
+    parser.add_argument('-s', '--studyRID',
+                        help="The study RID.", required=True)
     args = parser.parse_args()
     return args
 
+
 def main():
     args = get_args()
-    studyRID=pd.read_json(args.studyRID+"_studyRID.json")
+    studyRID = pd.read_json(args.studyRID+"_studyRID.json")
     if studyRID["RID"].count() > 0:
-        studyRID["RID"].to_csv(args.studyRID+"_studyRID.csv",header=False,index=False)
+        studyRID["RID"].to_csv(
+            args.studyRID+"_studyRID.csv", header=False, index=False)
     else:
         raise Exception("No associated replicates found: %s" %
-            studyRID)
+                        studyRID)
+
 
 if __name__ == '__main__':
     main()
diff --git a/workflow/scripts/tinHist.py b/workflow/scripts/tinHist.py
index 3d292c2..a95a9c2 100644
--- a/workflow/scripts/tinHist.py
+++ b/workflow/scripts/tinHist.py
@@ -6,38 +6,47 @@ import numpy as np
 import warnings
 warnings.simplefilter(action='ignore', category=FutureWarning)
 
+
 def get_args():
     parser = argparse.ArgumentParser()
-    parser.add_argument('-r', '--repRID',help="The replicate RID.",required=True)
+    parser.add_argument(
+        '-r', '--repRID', help="The replicate RID.", required=True)
     args = parser.parse_args()
     return args
 
+
 def main():
     args = get_args()
-    tin = pd.read_csv(args.repRID + '.sorted.deduped.tin.xls',sep="\t",header=0)
-    
-    hist = pd.cut(tin['TIN'],bins=pd.interval_range(start=0,freq=10,end=100,closed='right')).value_counts(sort=False)
+    tin = pd.read_csv(args.repRID + '.sorted.deduped.tin.xls',
+                      sep="\t", header=0)
+
+    hist = pd.cut(tin['TIN'], bins=pd.interval_range(
+        start=0, freq=10, end=100, closed='right')).value_counts(sort=False)
     labels = ["{0} - {1}".format(i, i + 9) for i in range(1, 100, 10)]
     #labels[0] = '0 - 10'
-    binned = tin.assign(Bins=lambda x: pd.cut(tin['TIN'],range(0,105,10),labels=labels,include_lowest=False,right=True))
-    binned['chrom'] = binned['chrom'] = binned['chrom'].replace('chr1','chr01')
-    binned['chrom'] = binned['chrom'].replace('chr2','chr02')
-    binned['chrom'] = binned['chrom'].replace('chr3','chr03')
-    binned['chrom'] = binned['chrom'].replace('chr4','chr04')
-    binned['chrom'] = binned['chrom'].replace('chr5','chr05')
-    binned['chrom'] = binned['chrom'].replace('chr6','chr06')
-    binned['chrom'] = binned['chrom'].replace('chr7','chr07')
-    binned['chrom'] = binned['chrom'].replace('chr8','chr08')
-    binned['chrom'] = binned['chrom'].replace('chr9','chr09')
-    hist = pd.pivot_table(binned, values='geneID', index = 'Bins', columns = 'chrom', aggfunc=np.size)
+    binned = tin.assign(Bins=lambda x: pd.cut(tin['TIN'], range(
+        0, 105, 10), labels=labels, include_lowest=False, right=True))
+    binned['chrom'] = binned['chrom'] = binned['chrom'].replace(
+        'chr1', 'chr01')
+    binned['chrom'] = binned['chrom'].replace('chr2', 'chr02')
+    binned['chrom'] = binned['chrom'].replace('chr3', 'chr03')
+    binned['chrom'] = binned['chrom'].replace('chr4', 'chr04')
+    binned['chrom'] = binned['chrom'].replace('chr5', 'chr05')
+    binned['chrom'] = binned['chrom'].replace('chr6', 'chr06')
+    binned['chrom'] = binned['chrom'].replace('chr7', 'chr07')
+    binned['chrom'] = binned['chrom'].replace('chr8', 'chr08')
+    binned['chrom'] = binned['chrom'].replace('chr9', 'chr09')
+    hist = pd.pivot_table(binned, values='geneID',
+                          index='Bins', columns='chrom', aggfunc=np.size)
     hist['TOTAL'] = hist.sum(axis=1)
-    hist = hist[['TOTAL'] + [ i for i in hist.columns if i != 'TOTAL']]
+    hist = hist[['TOTAL'] + [i for i in hist.columns if i != 'TOTAL']]
     hist = hist.T.fillna(0.0).astype(int)
     #hist = hist.apply(lambda x: x/x.sum()*100, axis=1)
-    hist.to_csv(args.repRID + '.tin.hist.tsv',sep='\t')
-    medFile = open(args.repRID + '.tin.med.csv',"w")
-    medFile.write(str(round(tin['TIN'][(tin['TIN']!=0)].median(),2)))
+    hist.to_csv(args.repRID + '.tin.hist.tsv', sep='\t')
+    medFile = open(args.repRID + '.tin.med.csv', "w")
+    medFile.write(str(round(tin['TIN'][(tin['TIN'] != 0)].median(), 2)))
     medFile.close()
 
+
 if __name__ == '__main__':
     main()
diff --git a/workflow/scripts/utils.py b/workflow/scripts/utils.py
index 5e4478e..548b84c 100644
--- a/workflow/scripts/utils.py
+++ b/workflow/scripts/utils.py
@@ -1,14 +1,5 @@
 #!/usr/bin/env python3
 
-#
-# * --------------------------------------------------------------------------
-# * Licensed under MIT (https://git.biohpc.swmed.edu/BICF/Astrocyte/chipseq_analysis/LICENSE.md)
-# * --------------------------------------------------------------------------
-#
-
-'''General utilities.'''
-
-
 import shlex
 import logging
 import subprocess
@@ -32,7 +23,8 @@ def run_pipe(steps, outfile=None):
         if n == first_step_n:
             if n == last_step_n and outfile:  # one-step pipeline with outfile
                 with open(outfile, 'w') as fh:
-                    print("one step shlex: %s to file: %s" % (shlex.split(step), outfile))
+                    print("one step shlex: %s to file: %s" %
+                          (shlex.split(step), outfile))
                     p = Popen(shlex.split(step), stdout=fh)
                 break
             print("first step shlex to stdout: %s" % (shlex.split(step)))
@@ -40,12 +32,14 @@ def run_pipe(steps, outfile=None):
             p = Popen(shlex.split(step), stdout=PIPE)
         elif n == last_step_n and outfile:  # only treat the last step specially if you're sending stdout to a file
             with open(outfile, 'w') as fh:
-                print("last step shlex: %s to file: %s" % (shlex.split(step), outfile))
+                print("last step shlex: %s to file: %s" %
+                      (shlex.split(step), outfile))
                 p_last = Popen(shlex.split(step), stdin=p.stdout, stdout=fh)
                 p.stdout.close()
                 p = p_last
         else:  # handles intermediate steps and, in the case of a pipe to stdout, the last step
-            print("intermediate step %d shlex to stdout: %s" % (n, shlex.split(step)))
+            print("intermediate step %d shlex to stdout: %s" %
+                  (n, shlex.split(step)))
             p_next = Popen(shlex.split(step), stdin=p.stdout, stdout=PIPE)
             p.stdout.close()
             p = p_next
@@ -54,7 +48,8 @@ def run_pipe(steps, outfile=None):
 
 
 def block_on(command):
-    process = subprocess.Popen(shlex.split(command), stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
+    process = subprocess.Popen(shlex.split(
+        command), stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
     for line in iter(process.stdout.readline, b''):
         sys.stdout.write(line.decode('utf-8'))
     process.communicate()
@@ -77,7 +72,7 @@ def count_lines(filename):
         "compress",
         "bzip2",
         "gzip"
-        ]
+    ]
     mime_type = mimetypes.guess_type(filename)[1]
     if mime_type in compressed_mimetypes:
         catcommand = 'gzip -dc'
@@ -86,7 +81,7 @@ def count_lines(filename):
     out, err = run_pipe([
         '%s %s' % (catcommand, filename),
         'wc -l'
-        ])
+    ])
     return int(out)
 
 
diff --git a/workflow/tests/test_alignReads.py b/workflow/tests/test_alignReads.py
index eae8780..11f0f3d 100644
--- a/workflow/tests/test_alignReads.py
+++ b/workflow/tests/test_alignReads.py
@@ -6,18 +6,24 @@ import os
 import utils
 
 data_output_path = os.path.dirname(os.path.abspath(__file__)) + \
-	'/../../'
+    '/../../'
 
 
 @pytest.mark.alignData
 def test_alignData_se():
-	assert os.path.exists(os.path.join(data_output_path, 'Q-Y5F6_1M.se.unal.gz'))
-	assert os.path.exists(os.path.join(data_output_path, 'Q-Y5F6_1M.se.sorted.bam'))
-	assert os.path.exists(os.path.join(data_output_path, 'Q-Y5F6_1M.se.sorted.bam.bai'))
+    assert os.path.exists(os.path.join(
+        data_output_path, 'Q-Y5F6_1M.se.unal.gz'))
+    assert os.path.exists(os.path.join(
+        data_output_path, 'Q-Y5F6_1M.se.sorted.bam'))
+    assert os.path.exists(os.path.join(
+        data_output_path, 'Q-Y5F6_1M.se.sorted.bam.bai'))
 
 
 @pytest.mark.alignData
 def test_alignData_pe():
-	assert os.path.exists(os.path.join(data_output_path, 'Q-Y5F6_1M.pe.unal.gz'))
-	assert os.path.exists(os.path.join(data_output_path, 'Q-Y5F6_1M.pe.sorted.bam'))
-	assert os.path.exists(os.path.join(data_output_path, 'Q-Y5F6_1M.pe.sorted.bam.bai'))
+    assert os.path.exists(os.path.join(
+        data_output_path, 'Q-Y5F6_1M.pe.unal.gz'))
+    assert os.path.exists(os.path.join(
+        data_output_path, 'Q-Y5F6_1M.pe.sorted.bam'))
+    assert os.path.exists(os.path.join(
+        data_output_path, 'Q-Y5F6_1M.pe.sorted.bam.bai'))
diff --git a/workflow/tests/test_consistency.py b/workflow/tests/test_consistency.py
index 073b128..4999148 100644
--- a/workflow/tests/test_consistency.py
+++ b/workflow/tests/test_consistency.py
@@ -6,19 +6,24 @@ from io import StringIO
 import os
 
 test_output_path = os.path.dirname(os.path.abspath(__file__)) + \
-                '/../../'
+    '/../../'
+
 
 @pytest.mark.consistencySE
 def test_consistencySE():
-    assert os.path.exists(os.path.join(test_output_path, 'SE_multiqc_data.json'))
-    assert readAssigned("assignedSE.txt","assignedExpectSE.txt")
+    assert os.path.exists(os.path.join(
+        test_output_path, 'SE_multiqc_data.json'))
+    assert readAssigned("assignedSE.txt", "assignedExpectSE.txt")
+
 
 @pytest.mark.consistencyPE
 def test_consistencyPE():
-    assert os.path.exists(os.path.join(test_output_path, 'PE_multiqc_data.json'))
-    assert readAssigned("assignedPE.txt","assignedExpectPE.txt")
+    assert os.path.exists(os.path.join(
+        test_output_path, 'PE_multiqc_data.json'))
+    assert readAssigned("assignedPE.txt", "assignedExpectPE.txt")
+
 
-def readAssigned(fileAssigned,fileExpectAssigned):
+def readAssigned(fileAssigned, fileExpectAssigned):
     data = False
     assigned = open(fileAssigned, "r")
     expect = open(fileExpectAssigned, "r")
diff --git a/workflow/tests/test_dataQC.py b/workflow/tests/test_dataQC.py
index e77d468..55df66d 100644
--- a/workflow/tests/test_dataQC.py
+++ b/workflow/tests/test_dataQC.py
@@ -6,12 +6,16 @@ from io import StringIO
 import os
 
 test_output_path = os.path.dirname(os.path.abspath(__file__)) + \
-                '/../../'
+    '/../../'
+
 
 @pytest.mark.dataQC
 def test_dataQC():
-    assert os.path.exists(os.path.join(test_output_path, 'Q-Y5F6_1M.se.sorted.deduped.tin.xls'))
-    assert countLines(os.path.join(test_output_path, 'Q-Y5F6_1M.se.sorted.deduped.tin.xls'))
+    assert os.path.exists(os.path.join(
+        test_output_path, 'Q-Y5F6_1M.se.sorted.deduped.tin.xls'))
+    assert countLines(os.path.join(test_output_path,
+                                   'Q-Y5F6_1M.se.sorted.deduped.tin.xls'))
+
 
 def countLines(fileName):
     data = False
diff --git a/workflow/tests/test_dedupReads.py b/workflow/tests/test_dedupReads.py
index 49cf420..89fc2b1 100644
--- a/workflow/tests/test_dedupReads.py
+++ b/workflow/tests/test_dedupReads.py
@@ -6,16 +6,24 @@ import os
 import utils
 
 data_output_path = os.path.dirname(os.path.abspath(__file__)) + \
-	'/../../'
+    '/../../'
 
 
 @pytest.mark.dedupData
 def test_dedupData():
-    assert os.path.exists(os.path.join(data_output_path, 'Q-Y5F6_1M.se.sorted.deduped.bam'))
-    assert os.path.exists(os.path.join(data_output_path, 'Q-Y5F6_1M.se.sorted.deduped.bam.bai'))
-    assert os.path.exists(os.path.join(data_output_path, 'Q-Y5F6_1M.se.sorted.deduped.chr8.bam'))
-    assert os.path.exists(os.path.join(data_output_path, 'Q-Y5F6_1M.se.sorted.deduped.chr8.bam.bai'))
-    assert os.path.exists(os.path.join(data_output_path, 'Q-Y5F6_1M.se.sorted.deduped.chr4.bam'))
-    assert os.path.exists(os.path.join(data_output_path, 'Q-Y5F6_1M.se.sorted.deduped.chr4.bam.bai'))
-    assert os.path.exists(os.path.join(data_output_path, 'Q-Y5F6_1M.se.sorted.deduped.chrY.bam'))
-    assert os.path.exists(os.path.join(data_output_path, 'Q-Y5F6_1M.se.sorted.deduped.chrY.bam.bai'))
+    assert os.path.exists(os.path.join(
+        data_output_path, 'Q-Y5F6_1M.se.sorted.deduped.bam'))
+    assert os.path.exists(os.path.join(
+        data_output_path, 'Q-Y5F6_1M.se.sorted.deduped.bam.bai'))
+    assert os.path.exists(os.path.join(
+        data_output_path, 'Q-Y5F6_1M.se.sorted.deduped.chr8.bam'))
+    assert os.path.exists(os.path.join(
+        data_output_path, 'Q-Y5F6_1M.se.sorted.deduped.chr8.bam.bai'))
+    assert os.path.exists(os.path.join(
+        data_output_path, 'Q-Y5F6_1M.se.sorted.deduped.chr4.bam'))
+    assert os.path.exists(os.path.join(
+        data_output_path, 'Q-Y5F6_1M.se.sorted.deduped.chr4.bam.bai'))
+    assert os.path.exists(os.path.join(
+        data_output_path, 'Q-Y5F6_1M.se.sorted.deduped.chrY.bam'))
+    assert os.path.exists(os.path.join(
+        data_output_path, 'Q-Y5F6_1M.se.sorted.deduped.chrY.bam.bai'))
diff --git a/workflow/tests/test_downsampleData.py b/workflow/tests/test_downsampleData.py
index fd42c49..6d98ad6 100644
--- a/workflow/tests/test_downsampleData.py
+++ b/workflow/tests/test_downsampleData.py
@@ -6,8 +6,9 @@ from io import StringIO
 import os
 
 test_output_path = os.path.dirname(os.path.abspath(__file__)) + \
-                '/../../'
+    '/../../'
+
 
 @pytest.mark.downsampleData
 def test_downsampleData():
-    assert os.path.exists(os.path.join(test_output_path, 'sampled.1.fq'))
\ No newline at end of file
+    assert os.path.exists(os.path.join(test_output_path, 'sampled.1.fq'))
diff --git a/workflow/tests/test_fastqc.py b/workflow/tests/test_fastqc.py
index 89303fe..07e7610 100644
--- a/workflow/tests/test_fastqc.py
+++ b/workflow/tests/test_fastqc.py
@@ -6,8 +6,10 @@ from io import StringIO
 import os
 
 test_output_path = os.path.dirname(os.path.abspath(__file__)) + \
-                '/../../'
+    '/../../'
+
 
 @pytest.mark.fastqc
 def test_fastqc():
-    assert os.path.exists(os.path.join(test_output_path, 'Q-Y5F6_1M.R1_fastqc.zip'))
+    assert os.path.exists(os.path.join(
+        test_output_path, 'Q-Y5F6_1M.R1_fastqc.zip'))
diff --git a/workflow/tests/test_getBag.py b/workflow/tests/test_getBag.py
index 1c63c9d..a99acc6 100644
--- a/workflow/tests/test_getBag.py
+++ b/workflow/tests/test_getBag.py
@@ -6,8 +6,10 @@ from io import StringIO
 import os
 
 test_output_path = os.path.dirname(os.path.abspath(__file__)) + \
-                '/../../'
+    '/../../'
+
 
 @pytest.mark.getBag
 def test_getBag():
-    assert os.path.exists(os.path.join(test_output_path, 'Replicate_Q-Y5F6.zip'))
+    assert os.path.exists(os.path.join(
+        test_output_path, 'Replicate_Q-Y5F6.zip'))
diff --git a/workflow/tests/test_getData.py b/workflow/tests/test_getData.py
index a14be93..95e2018 100644
--- a/workflow/tests/test_getData.py
+++ b/workflow/tests/test_getData.py
@@ -6,9 +6,12 @@ from io import StringIO
 import os
 
 test_output_path = os.path.dirname(os.path.abspath(__file__)) + \
-                '/../../'
+    '/../../'
+
 
 @pytest.mark.getData
 def test_getData():
-    assert os.path.exists(os.path.join(test_output_path, 'Replicate_Q-Y5F6/bagit.txt'))
-    assert os.path.exists(os.path.join(test_output_path, 'Replicate_Q-Y5F6/data/assets/Study/Q-Y4GY/Experiment/Q-Y4DP/Replicate/Q-Y5F6/mMARIS_Six2-#3.gene.rpkm.txt'))
+    assert os.path.exists(os.path.join(
+        test_output_path, 'Replicate_Q-Y5F6/bagit.txt'))
+    assert os.path.exists(os.path.join(
+        test_output_path, 'Replicate_Q-Y5F6/data/assets/Study/Q-Y4GY/Experiment/Q-Y4DP/Replicate/Q-Y5F6/mMARIS_Six2-#3.gene.rpkm.txt'))
diff --git a/workflow/tests/test_inferMetadata.py b/workflow/tests/test_inferMetadata.py
index 518664c..7485163 100644
--- a/workflow/tests/test_inferMetadata.py
+++ b/workflow/tests/test_inferMetadata.py
@@ -6,9 +6,10 @@ from io import StringIO
 import os
 
 test_output_path = os.path.dirname(os.path.abspath(__file__)) + \
-                '/../../'
+    '/../../'
+
 
 @pytest.mark.inferMetadata
 def test_inferMetadata():
-    assert os.path.exists(os.path.join(test_output_path, 'Q-Y5F6_1M.se.inferMetadata.log'))
-
+    assert os.path.exists(os.path.join(
+        test_output_path, 'Q-Y5F6_1M.se.inferMetadata.log'))
diff --git a/workflow/tests/test_makeBigWig.py b/workflow/tests/test_makeBigWig.py
index 9292ac6..d8f62f5 100644
--- a/workflow/tests/test_makeBigWig.py
+++ b/workflow/tests/test_makeBigWig.py
@@ -6,7 +6,7 @@ import os
 import utils
 
 data_output_path = os.path.dirname(os.path.abspath(__file__)) + \
-	'/../../'
+    '/../../'
 
 
 @pytest.mark.makeBigWig
diff --git a/workflow/tests/test_makeFeatureCounts.py b/workflow/tests/test_makeFeatureCounts.py
index d33527a..e67bca8 100644
--- a/workflow/tests/test_makeFeatureCounts.py
+++ b/workflow/tests/test_makeFeatureCounts.py
@@ -6,11 +6,14 @@ import os
 import utils
 
 data_output_path = os.path.dirname(os.path.abspath(__file__)) + \
-	'/../../'
+    '/../../'
 
 
 @pytest.mark.makeFeatureCounts
 def test_makeFeatureCounts():
-	assert os.path.exists(os.path.join(data_output_path, 'Q-Y5F6_1M.se.countData'))
-	assert os.path.exists(os.path.join(data_output_path, 'Q-Y5F6_1M.se.countTable.csv'))
-	assert os.path.exists(os.path.join(data_output_path, 'Q-Y5F6_1M.se.tpmTable.csv'))
+    assert os.path.exists(os.path.join(
+        data_output_path, 'Q-Y5F6_1M.se.countData'))
+    assert os.path.exists(os.path.join(
+        data_output_path, 'Q-Y5F6_1M.se.countTable.csv'))
+    assert os.path.exists(os.path.join(
+        data_output_path, 'Q-Y5F6_1M.se.tpmTable.csv'))
diff --git a/workflow/tests/test_outputBag.py b/workflow/tests/test_outputBag.py
index 4132d83..c73e647 100644
--- a/workflow/tests/test_outputBag.py
+++ b/workflow/tests/test_outputBag.py
@@ -6,7 +6,8 @@ from io import StringIO
 import os
 
 test_output_path = os.path.dirname(os.path.abspath(__file__)) + \
-                '/../../'
+    '/../../'
+
 
 @pytest.mark.outputBag
 def test_outputBag():
diff --git a/workflow/tests/test_parseMetadata.py b/workflow/tests/test_parseMetadata.py
index 59677bb..fa48880 100644
--- a/workflow/tests/test_parseMetadata.py
+++ b/workflow/tests/test_parseMetadata.py
@@ -6,13 +6,15 @@ from io import StringIO
 import os
 
 test_output_path = os.path.dirname(os.path.abspath(__file__)) + \
-                '/../../'
+    '/../../'
+
 
 @pytest.mark.parseMetadata
 def test_parseMetadata():
     assert os.path.exists(os.path.join(test_output_path, 'design.csv'))
     assert readLine(os.path.join(test_output_path, 'design.csv'))
 
+
 def readLine(fileName):
     data = False
     file = open(fileName, "r")
diff --git a/workflow/tests/test_trimData.py b/workflow/tests/test_trimData.py
index ba0eeda..40dd2dc 100644
--- a/workflow/tests/test_trimData.py
+++ b/workflow/tests/test_trimData.py
@@ -6,14 +6,18 @@ from io import StringIO
 import os
 
 test_output_path = os.path.dirname(os.path.abspath(__file__)) + \
-                '/../../'
+    '/../../'
+
 
 @pytest.mark.trimData
 def test_trimData_se():
-    assert os.path.exists(os.path.join(test_output_path, 'Q-Y5F6_1M.se_trimmed.fq.gz'))
+    assert os.path.exists(os.path.join(
+        test_output_path, 'Q-Y5F6_1M.se_trimmed.fq.gz'))
 
 
 @pytest.mark.trimData
 def test_trimData_pe():
-    assert os.path.exists(os.path.join(test_output_path, 'Q-Y5F6_1M.pe_R1_val_1.fq.gz'))
-    assert os.path.exists(os.path.join(test_output_path, 'Q-Y5F6_1M.pe_R2_val_2.fq.gz'))
+    assert os.path.exists(os.path.join(
+        test_output_path, 'Q-Y5F6_1M.pe_R1_val_1.fq.gz'))
+    assert os.path.exists(os.path.join(
+        test_output_path, 'Q-Y5F6_1M.pe_R2_val_2.fq.gz'))
-- 
GitLab