Skip to content

Commit

Permalink
Update dependency of spark-avro that was donated to ASF from 2.4.0 of…
Browse files Browse the repository at this point in the history
… Spark by Databricks
  • Loading branch information
sungjuly committed Jan 31, 2019
1 parent b15f13a commit e9cacb1
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 3 deletions.
4 changes: 2 additions & 2 deletions project/SparkRedshiftBuild.scala
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ object SparkRedshiftBuild extends Build {
"com.eclipsesource.minimal-json" % "minimal-json" % "0.9.4",
// We require spark-avro, but avro-mapred must be provided to match Hadoop version.
// In most cases, avro-mapred will be provided as part of the Spark assembly JAR.
"com.databricks" %% "spark-avro" % "3.0.0",
"org.apache.spark" %% "spark-avro" % sparkVersion.value,
if (testHadoopVersion.value.startsWith("1")) {
"org.apache.avro" % "avro-mapred" % "1.7.7" % "provided" classifier "hadoop1" exclude("org.mortbay.jetty", "servlet-api")
} else {
Expand Down Expand Up @@ -118,7 +118,7 @@ object SparkRedshiftBuild extends Build {
"org.apache.spark" %% "spark-core" % testSparkVersion.value % "test" exclude("org.apache.hadoop", "hadoop-client") force(),
"org.apache.spark" %% "spark-sql" % testSparkVersion.value % "test" exclude("org.apache.hadoop", "hadoop-client") force(),
"org.apache.spark" %% "spark-hive" % testSparkVersion.value % "test" exclude("org.apache.hadoop", "hadoop-client") force(),
"com.databricks" %% "spark-avro" % testSparkAvroVersion.value % "test" exclude("org.apache.avro", "avro-mapred") force()
"org.apache.spark" %% "spark-avro" % testSparkVersion.value % "test" exclude("org.apache.avro", "avro-mapred") force()
),
// Although spark-avro declares its avro-mapred dependency as `provided`, its version of the
// dependency can still end up on the classpath during tests, which breaks the tests for
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ private[redshift] class RedshiftWriter(
val writer = sqlContext.createDataFrame(convertedRows, convertedSchema).write
(tempFormat match {
case "AVRO" =>
writer.format("com.databricks.spark.avro")
writer.format("avro")
case "CSV" =>
writer.format("csv")
.option("escape", "\"")
Expand Down

0 comments on commit e9cacb1

Please sign in to comment.