diff --git a/project/SparkRedshiftBuild.scala b/project/SparkRedshiftBuild.scala index 1a5301f9..8d70c855 100644 --- a/project/SparkRedshiftBuild.scala +++ b/project/SparkRedshiftBuild.scala @@ -47,7 +47,7 @@ object SparkRedshiftBuild extends Build { organization := "com.databricks", scalaVersion := "2.11.7", crossScalaVersions := Seq("2.10.5", "2.11.7"), - sparkVersion := "2.0.0", + sparkVersion := "2.4.0", testSparkVersion := sys.props.get("spark.testVersion").getOrElse(sparkVersion.value), testSparkAvroVersion := sys.props.get("sparkAvro.testVersion").getOrElse("3.0.0"), testHadoopVersion := sys.props.get("hadoop.testVersion").getOrElse("2.2.0"), diff --git a/src/main/scala/com/databricks/spark/redshift/Parameters.scala b/src/main/scala/com/databricks/spark/redshift/Parameters.scala index 875f5b75..72b42929 100644 --- a/src/main/scala/com/databricks/spark/redshift/Parameters.scala +++ b/src/main/scala/com/databricks/spark/redshift/Parameters.scala @@ -69,7 +69,8 @@ private[redshift] object Parameters { "You cannot specify both the 'dbtable' and 'query' parameters at the same time.") } val credsInURL = userParameters.get("url") - .filter(url => url.contains("user=") || url.contains("password=")) + .filter(url => url.contains("user=") || url.contains("password=") + || url.contains(":iam://") ) if (userParameters.contains("user") || userParameters.contains("password")) { if (credsInURL.isDefined) { throw new IllegalArgumentException( diff --git a/src/main/scala/com/databricks/spark/redshift/RedshiftFileFormat.scala b/src/main/scala/com/databricks/spark/redshift/RedshiftFileFormat.scala index 30f56b60..27a12dd0 100644 --- a/src/main/scala/com/databricks/spark/redshift/RedshiftFileFormat.scala +++ b/src/main/scala/com/databricks/spark/redshift/RedshiftFileFormat.scala @@ -36,7 +36,7 @@ import org.apache.spark.sql.types.StructType * This is not intended for public consumption / use outside of this package and therefore * no API stability is guaranteed. */ -private[redshift] class RedshiftFileFormat extends FileFormat { +class RedshiftFileFormat extends FileFormat { override def inferSchema( sparkSession: SparkSession, options: Map[String, String], diff --git a/src/main/scala/com/databricks/spark/redshift/RedshiftWriter.scala b/src/main/scala/com/databricks/spark/redshift/RedshiftWriter.scala index 8383231d..574c8a15 100644 --- a/src/main/scala/com/databricks/spark/redshift/RedshiftWriter.scala +++ b/src/main/scala/com/databricks/spark/redshift/RedshiftWriter.scala @@ -282,7 +282,7 @@ private[redshift] class RedshiftWriter( val writer = sqlContext.createDataFrame(convertedRows, convertedSchema).write (tempFormat match { case "AVRO" => - writer.format("com.databricks.spark.avro") + writer.format("avro") case "CSV" => writer.format("csv") .option("escape", "\"")