diff --git a/workflow/configs/biohpc.config b/workflow/configs/biohpc.config
index 94565f9c577a3fb6ab583b3f3a140702d011ee5f..9ff1e4bf38d24e68075bd6dbfd7719d75ca63741 100755
--- a/workflow/configs/biohpc.config
+++ b/workflow/configs/biohpc.config
@@ -1,10 +1,11 @@
 singularity {
   enabled = true
-  runOptions = '\
-  --bind /cm/shared/apps/slurm/16.05.8,/etc/slurm,/cm/shared/apps/slurm/var/etc/,/usr/lib64/libreadline.so.6 \
-  --bind /usr/lib64/libhistory.so.6,/usr/lib64/libtinfo.so.5,/var/run/munge,/usr/lib64/libmunge.so.2 \
-  --bind /usr/lib64/libmunge.so.2.0.0,/cm/shared/apps/slurm/16.05.8/lib64/slurm/ \
-  --bind /cm/shared/apps/java/oracle/jdk1.8.0_231'
+  runOptions = ''
+  // Below connects the experimental atac container to BioHPC's Slurm job scheduler.
+  // runOptions = '\
+  // --bind /cm/shared/apps/slurm/16.05.8,/etc/slurm,/cm/shared/apps/slurm/var/etc/,/usr/lib64/libreadline.so.6 \
+  // --bind /usr/lib64/libhistory.so.6,/usr/lib64/libtinfo.so.5,/var/run/munge,/usr/lib64/libmunge.so.2 \
+  // --bind /usr/lib64/libmunge.so.2.0.0,/cm/shared/apps/slurm/16.05.8/lib64/slurm/'
 
                                              // Please do NOT use "--disable-cache" in this runOptions. 
                                               // Starting from version 2.0.0, the astrocyte_cli will clean up the cache automatically.
@@ -21,7 +22,8 @@ process {
     beforeScript = 'ulimit -Ss unlimited'
 
     withName:runSource {
-        container = 'docker://git.biohpc.swmed.edu:5050/s219741/astrocyte-atac-source/atac:0.0.1'
+        // Experimental containerized version of the caper software.
+        // container = 'docker://git.biohpc.swmed.edu:5050/s219741/astrocyte-atac-source/atac:0.0.1'
         executor = 'local'
     }
 }
diff --git a/workflow/main.nf b/workflow/main.nf
index 30b3662815c0c15bf3f65cac190ab14e4dc30a45..7ebcf8035ecfd95091552ab9928c03fae85d27dc 100644
--- a/workflow/main.nf
+++ b/workflow/main.nf
@@ -28,17 +28,20 @@ process runSource {
     
     shell:
     '''
-    # Allow for the container to use the libraries & paths of Slurm on BioHPC.
-    export LD_LIBRARY_PATH=/atac/jdk-12/lib:/usr/lib64:/lib:$LD_LIBRARY_PATH
-    export PATH=/atac/jdk-12:/atac/jdk-12/bin:/bin:/cm/shared/apps/slurm/16.05.8/bin:$PATH
+    # Enable the use of bash-specific conda commands in this shell.
+    eval "$(conda shell.bash hook)"
 
-    # Provide the container the SlurmUser (user and group) info used on Nucleus.
-    echo "slurm:x:450:450::/cm/local/apps/slurm:/bin/bash" >> /etc/passwd
-    echo "slurm:x:450:" >> /etc/group
+    module load python/3.8.x-anaconda
+    module load openjdk/18
 
-    # Source the container's entrypoint script to have access to the caper 
-    # commands to run the ATAC-seq pipeline in the runner.
-    source /atac/entrypoint.sh
+    # Create a temporary conda environment for caper.
+    conda create -y -c bioconda -c defaults -c conda-forge --name astrocyte-atac-caper python=3.8.18
+    # Activate and install caper.
+    conda activate astrocyte-atac-caper
+    export PATH=$PATH:~/.local/bin
+    pip install caper
+    # Creates a directory in the user's home2: ~/.caper/
+    caper init slurm
 
     # Record the relevant software versions.
     java -version 2> java_version.txt
@@ -46,15 +49,20 @@ process runSource {
     caper --version > caper_version.txt
 
     # Launch the ATAC-seq leader job.
-    submit=$(caper hpc submit !{baseDir}/external_repo/astrocyte-atac-runner/atac.wdl -i !{inputJson} --singularity --leader-job-name atac-source)
+    jobsubmit=$(caper hpc submit !{baseDir}/external_repo/astrocyte-atac-runner/atac.wdl -i !{inputJson} --singularity --leader-job-name atac-source)
 
     # Monitor the state of the leader job; if it enters the COMPLETED, FAILED, or CANCELLED state, then finish the workflow process.
-    state=$(bash !{baseDir}/scripts/checkJobState.sh "${submit}")
+    state=$(bash !{baseDir}/scripts/checkJobState.sh "${jobsubmit}")
     echo "Lead Job state check $(date) - State: $state" >> lead_job_check.txt
     while [[ "$state" != *"COMPLETED"* ]] && [[ "$state" != *"FAILED"* ]] && [[ "$state" != *"CANCELLED"* ]]; do
         sleep 15
-        state=$(bash !{baseDir}/scripts/checkJobState.sh "${submit}")
+        state=$(bash !{baseDir}/scripts/checkJobState.sh "${jobsubmit}")
         echo "Lead Job state check $(date) - State: $state" >> lead_job_check.txt
     done
+    
+    # Deactivate the temporary caper conda environment and delete it.
+    conda deactivate
+    conda remove --name astrocyte-atac-caper --all
+    rm -rf ~/.caper/
     '''
 }