/*
   Copyright 2010 Aaron J. Radke

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
*/
package cc.drx

/**Generic sound src*/
trait Sound{
  def play(implicit render:Sound.Render):Sound.SoundOutput
  // def loop(implicit render:Sound.Render):Unit = ??? //compose several play's
  def save(file:File)(implicit render:Sound.Render):Unit
}
object Sound{
  //TODO use a generic convert tool that combines image magic, pandoc, and ffmpeg that just does the right thing based on the type of file

  /**use a ffmpeg (if on the path) scenes to make an mp3 file*/
  def toMP3(src:File)(implicit ec:ExecutionContext):Future[File] = {
    val dst = src.companion("mp3")
    val cmd = Shell( s"ffmpeg -y -i ${src.path} -codec:a libmp3lame -qscale:a 2 ${dst.path}")
    cmd.lines.map{_ => dst}
  }
  /**use a ffmpeg (if on the path) scenes to make an wav file*/
  def toWAV(src:File)(implicit ec:ExecutionContext):Future[File] = {
    val dst = src.companion("wav")
    //val sampleRate = 5.k.Hz
    //use the additional flag to downsample to a sample rate:  -ar ${sampleRate.hz.toFloat} 
    val cmd = Shell( s"ffmpeg -y -i ${src.path} -codec:a pcm_s16le ${dst.path}")
    cmd.lines.map{_ => dst}
  }
  /**The java sound library is way over complex for the simple cases*/
  /*
  case object Empty extends SoundSample{
    def play(implicit render:Render):Unit =  ???  //TODO do the right thing here
    def save(file:File)(implicit render:Render):Unit = ??? //TODO do the right thing here
  }
  */
  //--file based
  def apply(file:File)(implicit render:Sound.Render) = SoundFile(file)(render)
  private val formats = "wav aiff".split(" ")

  /*
  //--sample based auto ranging
  def apply[A](samples:Iterable[A], sampleRate:Frequency)(implicit b:Bound.Boundable[A],boundOf:Bound.BoundOf[A]):SoundSample[A] = {
    val sampleDomain = Bound.find(samples) getOrElse Bound.of[A]//Bound(0d,1d)
    SoundSample(samples, sampleDomain, sampleRate, channels=1)
  }
  */

  //--sample based specified range
  /*
  def apply[A:Bound.Boundable](samples:Iterable[A], sampleRate:Frequency, sampleDomain:Bound[A]):SoundSample[A] = {
      val length:Time = sampleRate.inv*samples.size
      SoundSample(samples, sampleDomain, sampleRate, channels=1, length)
  }
  */
  /**assume mono samples from -1 to 1 */
  def apply(samples:Array[Double], sampleRate:Frequency, sampleDomain:Bound[Double]):SoundSample[Double] = {
    // val sampleDomain = Bound(-1d, 1d) //TODO should the stats be done here?
    val length = sampleRate.inv*samples.size
    SoundSample(samples, sampleDomain, sampleRate, channels=1, length)
  }
  def apply(samples:Array[Short], sampleRate:Frequency):SoundSample[Short] = {
    val sampleDomain = Bound.of[Short]
    val length = sampleRate.inv*samples.size
    SoundSample(samples, sampleDomain, sampleRate, channels=1, length)
  }

  /**convenience constructor for sinusoid samples*/
  def sin(freq:Frequency, length:Time=1.s, sampleRate:Frequency=8.k.hz):SoundSample[Double] =  {
    val nSamples:Int = (sampleRate*length).toInt
    val w:Double = freq.hz*tau
    def t(i:Int):Double = i*length.s/nSamples
    val samples:Array[Double] = Array.tabulate(nSamples){i => math.sin(w*t(i))}
    val sampleDomain = Bound(-1d,1d)
    SoundSample(samples, sampleDomain, sampleRate, channels=1, length)
  }

  case class SoundFile(file:File)(implicit render:Sound.Render){
    private var _length:Option[Time] = None
    def length:Time = if(_length.isDefined) _length.get else {
      val len = render.length(file)
      _length = Some(len)
      len
    }
    def load[A](time:Bound[Time], pcmRange:Bound[A]):SoundSample[A] = render.load(file, time, pcmRange)
    def load = render.load(file)
    // def toSoundSample[A](range:Bound[A]):Try[SoundSample[A]] = render.load(file, range)
    def play:SoundOutput = load(Bound(0.s,60.s), Bound.of[Short]).play(render)
    def save(file:File):Unit = ??? //TODO do the right thing here

    def sampleIt[A](pcmRange:Bound[A]):Iterator[A] = render.sampleIt(file, pcmRange)
  }

  /**samples are assumed to be prescaled linear pressure mapped to PCM_SIGNED 16bit (Short) sound samples*/
  case class SoundSample[A](samples:Iterable[A], sampleDomain:Bound[A], sampleRate:Frequency, channels:Int, length:Time) extends Sound{

    //--generic implementations
    override def toString =
      s"SoundSample($sampleDomain, ${sampleRate.nice}, ${(channels == 1).getOrElse("mono",channels.toString)} )"
    //--PCM 16bit sound assumptions??
    // val sampleRange = Bound.of[Short] //16bit signed integer
    def play(implicit render:Render):SoundOutput = render.play(this)
    def save(file:File)(implicit render:Render):Unit = render.save(this, file)

    //--known length sample TODO can these be optional for stream based samples??
    // lazy val sampleSize = samples.size //FIXME make these optional
    // lazy val length:Time = sampleRate.inv*sampleSize //FIXME make these optional

    def slice(timeBound:Bound[Time]):SoundSample[A] = {
      val a = (timeBound.min*sampleRate).floor.toInt
      val b = (timeBound.max*sampleRate).ceil.toInt
      copy( samples = samples.drop(a).take(b-a) )
    }

    def ++(that:SoundSample[A]):SoundSample[A] = {
      require(this.sampleRate == that.sampleRate, "Joining sound samples requires same sample rate")
      require(this.channels == that.channels, "Joining sound samples requires same channels")
      SoundSample(
        this.samples      ++ that.samples,
        that.sampleDomain ++ that.sampleDomain,
        sampleRate,
        channels,
        this.length + that.length
      )
    }
  }

  trait SoundOutput{
    def sampleRate:Frequency
    def cursor:Time

    // def pause:Unit
    //--stop playing, and empty
    // def stop:Unit
    //--close: stop and close the line
    def close:Unit
    def stop:Unit
    //block until playing is done
    // def drain:Unit  = line.drain
    def play(s:SoundSample[_]):SoundOutput

    def isActive:Boolean

    //TODO use a callback when done
    // def onStop
  }

  trait Render{
    //--required
    def play(s:Sound):SoundOutput
    def save(s:Sound,file:File):Unit
    //def stop //TODO add an ability to stop
    // def load[A](file:File, range:Bound[A]):Try[SoundSample[A]] = Try(load(file, 0.s ~ 60.s, range))
    //-- generic skipping sample from a file
    def load[A](file:File, time:Bound[Time], pcmRange:Bound[A]):SoundSample[A]
    def length(file:File):Time
    def sampleIt[A](file:File, pcmRange:Bound[A]):Iterator[A]

    //-- nice interfaces
    /**simple assuming defaults for file load*/
    def load(file:File):SoundSample[Double] = {
      val pcmRange = Bound.of[Short].map{_.toDouble}  //default pcm scale as a double
      val timeSlice:Bound[Time] = Bound(0.s, length(file))  //default timeSlice to the whole file
      load(file, timeSlice, pcmRange)
    }

    //TODO remove the following
    // def load(file:File):Try[SoundSample[Double]] = load(file, Bound.of[Short].map{_.toDouble})
  }

  //TODO move to a jvm specific compile configuration to support future scala-js and scala-native configurations
  implicit object RenderJVM extends Render{ //FIXME this used to be implicit
    import Implicit.ec  //TODO add parameters so alternative execution contexts can be utilized

    //--java sound api
    import javax.sound.sampled._ //{AudioSystem,AudioFormat,Mixer,AudioInputStream}

    private def load[A](f: => A):A = Loader.from(classOf[AudioSystem]){ f } //wrapper to make sure the AudioSystem class loader is used to find local resources https://stackoverflow.com/a/25083123/622016

    lazy val mixers:Vector[Mixer.Info] = load{AudioSystem.getMixerInfo()}.toVector

    override def toString = mixers.zipWithIndex.map{case (m,i) => s"$i. mixer info: "+m.getName}.mkString("\n")

    private def format(sampleRate:Frequency) =
      new AudioFormat(sampleRate.hz.toFloat, 16, 1, true, true) //sampleRate, sampleSizeInBits, signed, bigEndian //TODO why is this set to bigendian when the others are set to little and it still works?

    case class RecordingLine(name:String, line:TargetDataLine, formats:Array[AudioFormat]){
      override def toString = s"# $name\n" + formats.zipWithIndex.map{case (f,i) => s" $i) $f"}.mkString("\n")
      def sampleIt[A](sampleRate:Frequency, pcmBound:Bound[A]=Bound.of[Short]):Iterator[A] = { //TODO add a callback scheme
        // val fmt = new AudioFormat(8000, 8, 1, true, false) //sampleRate, sampleSizeInBits, signed, bigEndian //TODO why is this set to bigendian when the others are set to little and it still works?
        val fmt = new AudioFormat(sampleRate.hz.toFloat, 16, 1, true, false) //FIXME make these settings configurable //sampleRate, sampleSizeInBits, signed, bigEndian //TODO why is this set to bigendian when the others are set to little and it still works?
        line.open(fmt)//importat place to incoporate the config
        line.start
        val ais = new AudioInputStream(line)
        val meta = new AisMeta(ais)
        RenderJVM.sampleIt(meta,pcmBound)
      }
      def close():Unit = {
        line.stop
        line.close
      }

      def canOpen:Boolean = {
        val fmt = format(8.k.hz) //format just check if the line can be opened
        val didOpen = Try{line.open(fmt)}.toOption.isDefined
        if(didOpen) Try{close()}
        didOpen
      }
    }
    def micLine:Option[RecordingLine] =
      recordingLines.find(_.name.toLowerCase contains "microphone").toList
                    // .filter(_.canOpen)
                    .headOption

    def recordingLines:List[RecordingLine] = {
      load{AudioSystem.getMixerInfo}.flatMap{mixerInfo =>
         // println(s"# $mixerInfo")
         val name = mixerInfo.toString
         load{AudioSystem getMixer mixerInfo}.getTargetLineInfo.flatMap{lineInfo =>
            // println(s"  * $lineInfo")
            // val line = load{AudioSystem getLine lineInfo}.asInstanceOf[TargetDataLine]
            load{AudioSystem getLine lineInfo} match {
              case line:TargetDataLine =>
                val formats = line.getLineInfo.asInstanceOf[DataLine.Info].getFormats
                // for((format,i) <- formats.zipWithIndex) println(s"    $i) $format")
                Some(RecordingLine(name, line, formats))
              case _ =>
                // println("     note: is not a TargetDataLine")
                None
            }
         }
      }
    }.toList

    // def soundSource(sampleRate:Frequncy):SoundSource = new DataLine(sampleRate)
    class RenderLine(val sampleRate:Frequency) extends SoundOutput {
      private lazy val line:javax.sound.sampled.SourceDataLine = {
        val f = format(sampleRate)
        val l = load{AudioSystem.getSourceDataLine(f)} //require the AudioSystem classloader
        l.open(f)
        l
      }
      def cursor:Time = Time(line.getMicrosecondPosition*1E-6)
      // private val bufferTime = 50.ms
      def play(s:SoundSample[_]):SoundOutput = {
        //--init
        stopFlag = false
        line.start

        //TODO include these implicits in the play arguments so the user can choose what contexts to use
        import Implicit.ec
        import Implicit.sc

        //--chunk parameters
        val chunkSize = 2.k //bytes chunk
        val bytesPerSample = 2 //assume pcm 16bit signed Short bytes
        val dt = s.sampleRate.inv*chunkSize/bytesPerSample*0.5 //0.8 seems to be not to short and not to long for smooth playback

        val bytes = pcmBytes(s)

        //--scheduled non-blocking futures to step through the logic and check for the stop flag
        def next(it:Iterator[Iterable[Byte]]):Future[Unit] = {
          // Log(dt, line.available, cursor, line.isActive) //use this to debug playback smoothness
          if(line.available == 0 || stopFlag || !it.hasNext) DrxFuture.unit //stop processing or reached end of data
          else {
            dt.delay{
              if(line.available > chunkSize/2) {
                val bs = it.next().toArray
                line.write(bs,0,bs.size)
              }
            } flatMap {_ => next(it)} flatMap {_ => DrxFuture.unit}
          }
        }

        //--launch the future
        val f = next(bytes grouped chunkSize)
        f.onComplete{t =>
          line.flush //remove dangling bits //FIXME needed to prevent looping clicks but not sure why
          // Log("completed", s.length, stopFlag, t)
          // line.stop
        }
        //--return this sound output controller
        //TODO possibly return the future that is moving through the chunks
        this
      }
      def pause:Unit = line.stop
      private var stopFlag = true
      def stop:Unit = {stopFlag = true; line.flush; line.stop}
      def close:Unit = {
        stop
        line.close
      }
      def isActive = line.isActive
    }
    /***/
    def play(s:Sound):SoundOutput = {
      s match {
        case s:SoundSample[_] =>
          val line = new RenderLine(s.sampleRate)
          line.play(s)
        case s:SoundFile =>  ???
          // Input(ais).readBytes{ba => line.write(ba, 0, ba.size); {} }
          // ais.close //if not already autoClosed  
        case _ => ???
      }
    }//end of play

    def length(file:File):Time = {
      val meta = AisMeta(file)
      meta.ais.close
      meta.length
    }
    object AisMeta{
      def apply(file:File):AisMeta = new AisMeta(load(AudioSystem.getAudioInputStream(file)))
    }
    class AisMeta(val ais:AudioInputStream){
      //--ais
      val format = ais.getFormat
      val frameCount = ais.getFrameLength //number of frames
      //--format
      val rate:Frequency = format.getSampleRate.toDouble.hz
      val encoding = format.getEncoding
      val channels = format.getChannels
      val isBigEndian = format.isBigEndian
      val frameSize = format.getFrameSize
      //--composites 
      def sampleType = (encoding, channels, frameSize, isBigEndian) //--tuple
      val length:Time = rate.inv*frameCount

      def frameOf(t:Time):Long = (t*rate).toLong
    }
    private def sampleIt[A](meta:AisMeta, timeSlice:Bound[Time], pcmRange:Bound[A]):Iterator[A] = {
      //--calculate skips
      val bytesPerFrame = 2
      val iStart = meta.frameOf(timeSlice.min)*bytesPerFrame
      val iEnd = meta.frameOf(timeSlice.max)*bytesPerFrame
      val N = (iEnd - iStart).toInt/bytesPerFrame //number of frames to skip

      //--do the jump
      val iJumped = meta.ais skip iStart
      if(iStart != iJumped) println(s"Warning: did not skip to $iStart but instead to $iJumped")

      sampleIt(meta, pcmRange).take(N)
    }
    private def sampleIt[A](meta:AisMeta, pcmRange:Bound[A]):Iterator[A] = {
      //--iterate bytes as doubles
      import AudioFormat.Encoding._
      val byteItSize = 128  //~ 5.k.hz * 2bytes/sample / 60fps
      val samples = meta.sampleType match {
        //-- 16bit, mono, signed little endian (wav files are little endian)
        case (PCM_SIGNED, 1, 2, false) =>
          val scale = Scale(Bound(-32768, 32767), pcmRange) //domain to pcmRange
          Input(meta.ais).byteIt(byteItSize).grouped(2).map{case Seq(a,b) => scale(
            (((b & 0xFF) << 8) | (a & 0xFF)).toShort.toInt //2's complment 16bit short
          )}
        //-- 16bit, mono, un-signed little endian
        case (PCM_UNSIGNED, 1, 2, false) =>
          val scale = Scale(Bound(0,65534), pcmRange) //domain to pcmRange
          Input(meta.ais).byteIt(byteItSize).grouped(2).map{case Seq(a,b) => scale(
            (((b & 0xFF) << 8) | (a & 0xFF)).toInt         //unsigned 16bit short is interpreted as 32bit int
          )}
        // Not implemented yet
        case sampleFormat => Console.err.println(s"no sample binary parser writen in drx.Sound for format: $sampleFormat (try 16bit pcm signed/unsigned mono bigendian)"); ???
      }
      //FIXME does the stream need to be closed here since it may be time sliced and not auto closed at the end??
      samples
    }

    def sampleIt[A](f:File, domain:Bound[A]):Iterator[A] = {
      val meta = AisMeta(f)
      sampleIt(meta, domain) //lazy load without memory
    }

    def load[A](f:File, timeSlice:Bound[Time], range:Bound[A]):SoundSample[A] = { //TODO why return the bound of double when the sampleDomain is internally represented
      val meta = AisMeta(f)
      val samples = sampleIt(meta, timeSlice, range).toVector //lazy load but memory backed //.toIterable //stored and close
      val dt = timeSlice.max - timeSlice.min
      SoundSample(samples, range, meta.rate, meta.channels, dt)
    }

    /**byte iterator M:Mono L:Linear S:Signed 16:Bit  L:big endian*/
    private def pcmBytes[A](s:SoundSample[A]):Iterable[Byte] = {
      val scale = Scale(s.sampleDomain, Bound.of[Short])
      s.samples.flatMap{scale(_).toByteArray}
    }

    /*
    private def pcmBytesOld[A](s:SoundSample[A]):Iterable[Byte] = {
        //--this whole trick of using a byte buffer is to use the java built endian conversion with putShort to byte orderings
        //--TODO try writing directly to an allocated array instead of double allocation work here
        val nBytes = s.sampleSize * 2 //2 Bytes in a 16bit Short
        val bb = java.nio.ByteBuffer.allocate(nBytes) //this is the whole buffer TODO maybe use chunks
        bb.order(java.nio.ByteOrder.BIG_ENDIAN)  //big endian //even with big endian encoding a wav file will get swapped back ???
        //bb.order(java.nio.ByteOrder.LITTLE_ENDIAN)  //little endian
        // Log(s.sampleDomain)
        val scale = Scale(s.sampleDomain, Bound.of[Short])
        for(v <- s.samples) bb.putShort{
          if(s.sampleDomain contains v) scale(v) else 0 //zero out max pressures values to no spike a speaker
        }
        bb.flip()
        val buffer = new Array[Byte](nBytes)
        bb.get(buffer)
        buffer
    }
    */

    /***/
    def save(s:Sound, file:File):Unit = {
      //--make an audio stream from bytes
      val ais:AudioInputStream = s match {
        case s:SoundSample[_] =>
          // Log(s.length, s.sampleRate, s.sampleSize, file) //FIXME remove this debug line
          // val bytes = pcmBytes(s).toArray
          // val bs1 = pcmBytes(s).toArray
          // val bs2 = pcmBytesOld(s).toArray
          // Log(bs1.size, bs2.size)
          //-- [iterable -> byteArray -> is] works so why doesn't the [iterable -> is] work?
          // val is = Input(pcmBytes(s)).is //Bad FIXME why?
          // val is = Input(pcmBytesOld(s)).is //Bad FIXME 
          // val is = Input(pcmBytes(s).toArray).is //Good
          // val is = Input(pcmBytesOld(s).toArray).is //Good
          // val is = Input(bs2).is 
          // val is = Input(pcmBytes(s).toArray).is //Good but fills ram with the byte stream
          val bytes = pcmBytes(s).toArray
          val sampleSize = bytes.size/2 //use this sampleSize calculated from the array construction since s.sampleSize may be inefficient
          val is = Input(bytes).is //Good but fills ram with the byte stream Note: the Input(pcmBytes(s)).is breaks saving
          new AudioInputStream(is, format(s.sampleRate), sampleSize)
        case s:SoundFile =>  ???//FIXME add loading file and autolookup the format type
        case _ => ???
      }
      //--write the stream
      //--lookup filetype
      val fileType = file.ext match {
        case "wav" => AudioFileFormat.Type.WAVE
        case _ => AudioFileFormat.Type.WAVE  //TODO implement other filetype lookups
      }
      load{AudioSystem.write(ais, fileType, file.file)}  //the AudioSystem class loader is required
      //--return alternate encoding
      //TODO add conversion to mp3
      // val base = file.base
      () //explicitly return a unit
    }

  } // End of the RenderJVM object

}