diff --git a/src/main/scala/com/databricks/spark/redshift/Parameters.scala b/src/main/scala/com/databricks/spark/redshift/Parameters.scala index 875f5b75..ec8dc6ea 100644 --- a/src/main/scala/com/databricks/spark/redshift/Parameters.scala +++ b/src/main/scala/com/databricks/spark/redshift/Parameters.scala @@ -18,6 +18,8 @@ package com.databricks.spark.redshift import com.amazonaws.auth.{AWSCredentialsProvider, BasicSessionCredentials} +import scala.util.Try + /** * All user-specifiable parameters for spark-redshift, along with their validation rules and * defaults. @@ -285,5 +287,11 @@ private[redshift] object Parameters { new BasicSessionCredentials(accessKey, secretAccessKey, sessionToken)) } } + + /** + * Timeout (in milliseconds) between writing temp files in S3 and calling + * upon Redshift to COPY. + */ + def writeTimeout: Try[Int] = Try(parameters.get("writetimeout").get.toInt) } } diff --git a/src/main/scala/com/databricks/spark/redshift/RedshiftWriter.scala b/src/main/scala/com/databricks/spark/redshift/RedshiftWriter.scala index 8383231d..361ecb9e 100644 --- a/src/main/scala/com/databricks/spark/redshift/RedshiftWriter.scala +++ b/src/main/scala/com/databricks/spark/redshift/RedshiftWriter.scala @@ -395,6 +395,11 @@ private[redshift] class RedshiftWriter( tempDir = params.createPerQueryTempDir(), tempFormat = params.tempFormat, nullString = params.nullString) + + if (params.writeTimeout.isSuccess) { + Thread.sleep(params.writeTimeout.get) + } + val conn = jdbcWrapper.getConnector(params.jdbcDriver, params.jdbcUrl, params.credentials) conn.setAutoCommit(false) try {