1. 在hdfs上保存一个文件而非目录 ```scala package cn.spark

    import org.apache.spark.sql.SparkSession import org.apache.spark.SparkContext
    import org.apache.spark.SparkContext. import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs. import java.net.URI

    import org.elasticsearch.spark. import org.elasticsearch.spark.sql. import org.apache.spark.sql.functions.{fromunixtime, date_format, current_timestamp, col, length, lit} import java.time.{ZonedDateTime, ZoneId, LocalDateTime} import java.time.format.DateTimeFormatter import java.io.

    object json2es { @transient lazy val logger = Logger.getLogger(getClass.getName)

    def main(args: Array[String]) { val spark = SparkSession.builder().appName(“Spark copy json samples to elasticsearch”).enableHiveSupport().getOrCreate() val sc = SparkContext.getOrCreate() val hdfsconf = new Configuration(); val fs = FileSystem.get(hdfsconf); val pushflagfile = “hdfs:///user/hive/warehouse/svReview/push.flag”

    1. val output = fs.create(new Path(pushflagfile));
    2. val os = new BufferedOutputStream(output)
    3. os.write("false".getBytes("UTF-8"))
    4. os.close()

    } }

    ```