Yes, you can but it has to be hdfs only. Refer to code below:
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{Row, SaveMode};
import org.apache.spark.sql.types.{StructType,StructField,StringType};
val hiveContext = new org.apache.spark.sql.hive.HiveContext(sc)
val results = hiveContext.read.format("com.databricks.spark.avro").load("people.avro")
val schema = results.schema.map( x => x.name.concat(" ").concat( x.dataType.toString() match { case "StringType" => "STRING"} ) ).mkString(",")
val hive_sql = "CREATE EXTERNAL TABLE people_and_age (" + schema + ") ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LOCATION '/user/ravi/people_age'"
val hive_sql1 = "CREATE EXTERNAL TABLE people_and_age1 (" + schema + ") ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LOCATION '/user/ravi/people_age'"
hiveContext.sql(hive_sql)
hiveContext.sql(hive_sql1)
results.saveAsTable("people_age",SaveMode.Overwrite)
hiveContext.sql("select * from people_age").show()
hiveContext.sql("select * from people_age1").show()