diff --git a/workflow/scripts/call_peaks_macs.py b/workflow/scripts/call_peaks_macs.py
index 38e03cc89d6c91d1d9f8247aa7396e2f9262cde6..cc003c9e892fe49b90c1fad285bd378bffbd9364 100644
--- a/workflow/scripts/call_peaks_macs.py
+++ b/workflow/scripts/call_peaks_macs.py
@@ -71,7 +71,7 @@ def check_tools():
     logger.info('Checking for required libraries and components on this system')
 
     macs_path = shutil.which("macs2")
-    if r_path:
+    if macs_path:
         logger.info('Found MACS2: %s', macs_path)
 
         # Get Version
@@ -135,7 +135,6 @@ def call_peaks_macs(experiment, xcor, control, prefix, genome_size, chrom_sizes)
         fragment_length = frag_lengths.split(',')[0]  # grab first value
         logger.info("Fraglen %s", fragment_length)
 
-
     # Generate narrow peaks and preliminary signal tracks
 
     command = 'macs2 callpeak ' + \
@@ -155,7 +154,6 @@ def call_peaks_macs(experiment, xcor, control, prefix, genome_size, chrom_sizes)
     narrowpeak_fn = '%s.narrowPeak' % (prefix)
     clipped_narrowpeak_fn = 'clipped-%s' % (narrowpeak_fn)
 
-
     steps = ['slopBed -i %s -g %s -b 0' % (int_narrowpeak_fn, chrom_sizes),
              'bedClip stdin %s %s' % (chrom_sizes, clipped_narrowpeak_fn)]
 
diff --git a/workflow/scripts/experiment_qc.py b/workflow/scripts/experiment_qc.py
index cf22338664a16157da2ad3b1c107a2af6d2372de..0418d0e34bee026e2756eec12462bfc85ba73dce 100644
--- a/workflow/scripts/experiment_qc.py
+++ b/workflow/scripts/experiment_qc.py
@@ -198,7 +198,6 @@ def main():
     check_spearman_correlation(mbs_filename)
     check_pearson_correlation(mbs_filename)
 
-
     # Run coverage
     check_coverage(design_df, extension)
 
diff --git a/workflow/scripts/map_qc.py b/workflow/scripts/map_qc.py
index ab63c423f613cef1fcdc2eaf91828b9b6d95b128..ecb6170013b1491c9eac3abf4dbd9a8c7d33c213 100644
--- a/workflow/scripts/map_qc.py
+++ b/workflow/scripts/map_qc.py
@@ -197,7 +197,6 @@ def dedup_mapped(bam, bam_basename, paired):
             shlex.split(sambamba_markdup_command),
             stderr=temp_file)
 
-
     # Remove duplicates
     final_bam_prefix = bam_basename + ".dedup"
     final_bam_filename = final_bam_prefix + ".bam"
diff --git a/workflow/scripts/motif_search.py b/workflow/scripts/motif_search.py
index 0c2e3f8c4efdefccaa016dfb4e3b75569ce0f5fa..fd82853ad00a151e9217eeb9416c8923b05538ab 100644
--- a/workflow/scripts/motif_search.py
+++ b/workflow/scripts/motif_search.py
@@ -56,6 +56,7 @@ def get_args():
 
 # Functions
 
+
 def check_tools():
     '''Checks for required componenets on user system'''
 
@@ -114,7 +115,7 @@ def motif_search(filename, genome, experiment, peak):
     else:
         peak_no = peak
 
-    sorted_fn = '%s.%s.narrowPeak' % (file_basename, peak)
+    sorted_fn = '%s.%s.narrowPeak' % (file_basename, peak_no)
 
     out, err = utils.run_pipe([
         'sort -k %dgr,%dgr %s' % (5, 5, filename),
@@ -127,8 +128,7 @@ def motif_search(filename, genome, experiment, peak):
     if err:
         logger.error("bedtools error: %s", err)
 
-
-    #Call memechip
+    # Call memechip
     out, err = utils.run_pipe([
         'meme-chip -oc %s -meme-minw 5 -meme-maxw 15 -meme-nmotifs 10 %s -norand' % (out_motif, out_fa)])
     if err:
diff --git a/workflow/scripts/overlap_peaks.py b/workflow/scripts/overlap_peaks.py
index 71438f779ab96695ead05a5653221ae99f5b3f6a..cd71c5a176e8d1c4b2067435ebf5b42779f2df8b 100644
--- a/workflow/scripts/overlap_peaks.py
+++ b/workflow/scripts/overlap_peaks.py
@@ -6,6 +6,7 @@ import os
 import argparse
 import logging
 import shutil
+import subprocess
 import pandas as pd
 import utils
 
diff --git a/workflow/scripts/pool_and_psuedoreplicate.py b/workflow/scripts/pool_and_psuedoreplicate.py
index f3d702f64b0cf1b9283b8ab4e26e9fbe7df57922..e050d7e034af1717c5ca3ad9b5fc198987f7bc2e 100644
--- a/workflow/scripts/pool_and_psuedoreplicate.py
+++ b/workflow/scripts/pool_and_psuedoreplicate.py
@@ -153,7 +153,6 @@ def self_psuedoreplication(tag_file, prefix, paired):
     logger.info("Running psuedo with %s", psuedo_command)
     subprocess.check_call(shlex.split(psuedo_command))
 
-
     # Convert read pairs to reads into standard tagAlign file
 
     for i, index in enumerate([0, 1]):
@@ -344,7 +343,6 @@ def main():
         tmp_metadata['tag_align'] = path_to_file
         design_new_df = design_new_df.append(tmp_metadata)
 
-
     # Write out new dataframe
     design_new_df.to_csv(experiment_id + '_ppr.tsv',
                          header=True, sep='\t', index=False)
diff --git a/workflow/scripts/software_report.py b/workflow/scripts/software_report.py
index 406b9bde9189f71093e54ade0268ce2dd40ace8e..2e605e3bb952fe3ffd891fa04800edb7f01650a0 100644
--- a/workflow/scripts/software_report.py
+++ b/workflow/scripts/software_report.py
@@ -1,11 +1,13 @@
 #!/usr/bin/env python3
 # -*- coding: utf-8 -*-
 
+'''Make YAML of software versions.'''
+
 from __future__ import print_function
 from collections import OrderedDict
 import re
 
-regexes = {
+software_regex = {
     'Trim Galore!': ['version_trimgalore.txt', r"version (\S+)"],
     'Cutadapt': ['version_cutadapt.txt', r"Version (\S+)"],
     'BWA': ['version_bwa.txt', r"Version: (\S+)"],
@@ -42,7 +44,7 @@ results['DiffBind'] = '<span style="color:#999999;\">N/A</span>'
 results['deepTools'] = '<span style="color:#999999;\">N/A</span>'
 
 # Search each file using its regex
-for k, v in regexes.items():
+for k, v in software_regex.items():
     with open(v[0]) as x:
         versions = x.read()
         match = re.search(v[1], versions)
@@ -61,6 +63,6 @@ print(
         <dl class="dl-horizontal">
     '''
 )
-for k,v in results.items():
-    print("        <dt>{}</dt><dd>{}</dd>".format(k,v))
+for k, v in results.items():
+    print("        <dt>{}</dt><dd>{}</dd>".format(k, v))
 print("        </dl>")
diff --git a/workflow/scripts/xcor.py b/workflow/scripts/xcor.py
index 3b1328cc80759d8e653d9c7d333102302113d43e..14e4ed30143755b4deece75f8d2cb8f43bc4703e 100644
--- a/workflow/scripts/xcor.py
+++ b/workflow/scripts/xcor.py
@@ -96,13 +96,11 @@ def xcor(tag, paired):
     tag_basename = os.path.basename(utils.strip_extensions(tag, STRIP_EXTENSIONS))
     uncompressed_tag_filename = tag_basename
 
-
     # Subsample tagAlign file
     number_reads = 15000000
     subsampled_tag_filename = \
         tag_basename + ".%d.tagAlign.gz" % (number_reads/1000000)
 
-
     steps = [
         'zcat %s' % (tag),
         'grep -v "chrM"',
@@ -142,7 +140,6 @@ def xcor(tag, paired):
     return cc_scores_filename
 
 
-
 def main():
     args = get_args()
     paired = args.paired