临时提交
This commit is contained in:
parent
738b39e9a0
commit
885dcbd044
0
app/CMakeLists.txt
Normal file
0
app/CMakeLists.txt
Normal file
@ -16,6 +16,13 @@ android {
|
|||||||
versionName "1.0.0"
|
versionName "1.0.0"
|
||||||
multiDexEnabled true
|
multiDexEnabled true
|
||||||
testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner"
|
testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner"
|
||||||
|
|
||||||
|
externalNativeBuild {
|
||||||
|
cmake {
|
||||||
|
cppFlags ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ndk {
|
ndk {
|
||||||
// 设置支持的SO库架构,第三方给的so库哪几种架构,就配置这几种架构
|
// 设置支持的SO库架构,第三方给的so库哪几种架构,就配置这几种架构
|
||||||
abiFilters 'armeabi-v7a', 'arm64-v8a'
|
abiFilters 'armeabi-v7a', 'arm64-v8a'
|
||||||
@ -76,7 +83,7 @@ android {
|
|||||||
enable = true
|
enable = true
|
||||||
}
|
}
|
||||||
|
|
||||||
ndkVersion '29.0.13846066 rc3'
|
ndkVersion '27.0.12077973'
|
||||||
|
|
||||||
applicationVariants.configureEach { variant ->
|
applicationVariants.configureEach { variant ->
|
||||||
if (variant.buildType.name != 'release') return
|
if (variant.buildType.name != 'release') return
|
||||||
@ -88,6 +95,18 @@ android {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sourceSets {
|
||||||
|
main {
|
||||||
|
jniLibs.srcDirs = ['src/main/libs']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
externalNativeBuild {
|
||||||
|
cmake {
|
||||||
|
path "CMakeLists.txt"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protobuf {
|
protobuf {
|
||||||
@ -162,4 +181,5 @@ dependencies {
|
|||||||
implementation libs.androidautosize
|
implementation libs.androidautosize
|
||||||
|
|
||||||
implementation files('libs/sherpa19.aar')
|
implementation files('libs/sherpa19.aar')
|
||||||
|
|
||||||
}
|
}
|
||||||
14
app/proguard-rules.pro
vendored
14
app/proguard-rules.pro
vendored
@ -277,4 +277,16 @@
|
|||||||
-keepclassmembers class org.scilab.forge.jlatexmath.** {
|
-keepclassmembers class org.scilab.forge.jlatexmath.** {
|
||||||
<fields>;
|
<fields>;
|
||||||
<methods>;
|
<methods>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
-keepclassmembers class android.media.AudioManager {
|
||||||
|
int maxActivePaths;
|
||||||
|
}
|
||||||
|
-keep class com.k2fsa.sherpa.onnx.** { *; }
|
||||||
|
-keepclassmembers class com.k2fsa.sherpa.onnx.** {
|
||||||
|
native <methods>;
|
||||||
|
<init>(...);
|
||||||
|
}
|
||||||
|
-keep interface com.k2fsa.sherpa.onnx.** { *; }
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -52,7 +52,7 @@
|
|||||||
tools:targetApi="31">
|
tools:targetApi="31">
|
||||||
|
|
||||||
<activity
|
<activity
|
||||||
android:name="com.zs.smarthuman.ui.SplashActivity"
|
android:name=".ui.MainActivity"
|
||||||
android:exported="true"
|
android:exported="true"
|
||||||
android:theme="@style/Theme.Splash"
|
android:theme="@style/Theme.Splash"
|
||||||
android:screenOrientation="portrait">
|
android:screenOrientation="portrait">
|
||||||
@ -66,10 +66,10 @@
|
|||||||
</intent-filter>
|
</intent-filter>
|
||||||
</activity>
|
</activity>
|
||||||
|
|
||||||
<activity
|
<!-- <activity
|
||||||
android:name="com.zs.smarthuman.ui.MainActivity"
|
android:name="com.zs.smarthuman.ui.MainActivity"
|
||||||
android:screenOrientation="portrait"/>
|
android:screenOrientation="portrait"/>
|
||||||
|
-->
|
||||||
<activity
|
<activity
|
||||||
android:name="com.zs.smarthuman.ui.ActivateActivity"
|
android:name="com.zs.smarthuman.ui.ActivateActivity"
|
||||||
android:screenOrientation="portrait"/>
|
android:screenOrientation="portrait"/>
|
||||||
|
|||||||
BIN
app/src/main/assets/ten-vad.onnx
Normal file
BIN
app/src/main/assets/ten-vad.onnx
Normal file
Binary file not shown.
@ -14,6 +14,6 @@ class ApiService {
|
|||||||
const val GET_USER_INFO_URL = "iot/info/getUserInfo"
|
const val GET_USER_INFO_URL = "iot/info/getUserInfo"
|
||||||
|
|
||||||
|
|
||||||
const val UPLOAD_RECORD_VOICE_URL = "/iot/chat/test"
|
const val UPLOAD_RECORD_VOICE_URL = "/iot/chat"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -16,7 +16,13 @@ public enum MessageContentType {
|
|||||||
/**
|
/**
|
||||||
* 接收后台音频响应指令
|
* 接收后台音频响应指令
|
||||||
*/
|
*/
|
||||||
RECEIVE_VOICE_STREAM(2);
|
RECEIVE_VOICE_STREAM(2),
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 更新用户信息
|
||||||
|
*/
|
||||||
|
UPDATE_INFO(3);
|
||||||
|
|
||||||
|
|
||||||
private int msgContentType;
|
private int msgContentType;
|
||||||
|
|||||||
@ -2,179 +2,190 @@ package com.zs.smarthuman.sherpa
|
|||||||
|
|
||||||
import android.content.res.AssetManager
|
import android.content.res.AssetManager
|
||||||
import android.util.Log
|
import android.util.Log
|
||||||
import kotlinx.coroutines.*
|
import kotlin.math.sqrt
|
||||||
|
|
||||||
class VoiceController(
|
class VoiceController(
|
||||||
assetManager: AssetManager,
|
assetManager: AssetManager,
|
||||||
private val onWakeup: () -> Unit,
|
private val onWakeup: () -> Unit,
|
||||||
private val onFinalAudio: (FloatArray) -> Unit,
|
private val onFinalAudio: (FloatArray) -> Unit,
|
||||||
private val idleTimeoutSeconds: Int = 15,
|
private val idleTimeoutSeconds: Int = 15,
|
||||||
private val maxRecordingSeconds: Int = 10, // ✅ 最大录音时长
|
private val maxRecordingSeconds: Int = 10,
|
||||||
private val onStateChanged: ((VoiceState) -> Unit)? = null,
|
private val onStateChanged: ((VoiceState) -> Unit)? = null,
|
||||||
private val stopBackendAudio: (() -> Unit)? = null
|
private val stopBackendAudio: (() -> Unit)? = null
|
||||||
) {
|
) {
|
||||||
|
|
||||||
private val TAG = "VoiceController"
|
private val TAG = "VoiceController"
|
||||||
|
private val sampleRate = 16000
|
||||||
|
|
||||||
/* ================= 状态 ================= */
|
/* ================= 状态 ================= */
|
||||||
|
|
||||||
private var state: VoiceState = VoiceState.WAIT_WAKEUP
|
private var state: VoiceState = VoiceState.WAIT_WAKEUP
|
||||||
set(value) {
|
set(value) {
|
||||||
field = value
|
field = value
|
||||||
|
Log.d(TAG, "➡ State = $value")
|
||||||
onStateChanged?.invoke(value)
|
onStateChanged?.invoke(value)
|
||||||
Log.d(TAG, "当前状态: $value")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var isPlaying = false
|
|
||||||
private set
|
|
||||||
|
|
||||||
/* ================= 唤醒 ================= */
|
/* ================= 唤醒 ================= */
|
||||||
|
|
||||||
private val WAKEUP_DISCARD_MS = 600L
|
|
||||||
private val WAKEUP_COOLDOWN_MS = 1500L
|
|
||||||
private var wakeupDiscardUntil = 0L
|
|
||||||
private var lastWakeupTime = 0L
|
|
||||||
|
|
||||||
private val wakeupManager = WakeupManager(assetManager) {
|
private val wakeupManager = WakeupManager(assetManager) {
|
||||||
val now = System.currentTimeMillis()
|
Log.d(TAG, "🔥 WakeWord detected")
|
||||||
if (now - lastWakeupTime < WAKEUP_COOLDOWN_MS) {
|
|
||||||
Log.d(TAG, "⚠️ 唤醒过于频繁,忽略")
|
|
||||||
return@WakeupManager
|
|
||||||
}
|
|
||||||
lastWakeupTime = now
|
|
||||||
|
|
||||||
Log.d(TAG, "🔥 唤醒触发")
|
|
||||||
|
|
||||||
stopBackendAudio?.invoke()
|
stopBackendAudio?.invoke()
|
||||||
isPlaying = false
|
resetAll()
|
||||||
|
state = VoiceState.PLAYING_PROMPT
|
||||||
audioBuffer.clear()
|
|
||||||
preBuffer.clear()
|
|
||||||
vadManager.reset()
|
|
||||||
vadStarted = false
|
|
||||||
vadEndPending = false
|
|
||||||
|
|
||||||
wakeupDiscardUntil = now + WAKEUP_DISCARD_MS
|
|
||||||
|
|
||||||
onWakeup()
|
onWakeup()
|
||||||
playLocalPrompt()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ================= VAD ================= */
|
/* ================= VAD(只负责 START) ================= */
|
||||||
|
|
||||||
private val vadManager = VadManager(
|
private val vadManager = VadManager(
|
||||||
assetManager,
|
assetManager,
|
||||||
onSpeechStart = { onVadSpeechStart() },
|
onSpeechStart = { onVadStart() },
|
||||||
onSpeechEnd = { onVadSpeechEnd() }
|
onSpeechEnd = { /* ❌ 不再用于结束 */ }
|
||||||
)
|
)
|
||||||
|
|
||||||
/* ================= 音频缓存 ================= */
|
/* ================= 音频缓存 ================= */
|
||||||
|
|
||||||
private val audioBuffer = mutableListOf<Float>()
|
private val audioBuffer = mutableListOf<Float>()
|
||||||
private val preBuffer = ArrayDeque<Float>()
|
private val preBuffer = ArrayDeque<Float>()
|
||||||
private val PRE_BUFFER_SIZE = 16000
|
private val PRE_BUFFER_SIZE = sampleRate / 2 // 500ms
|
||||||
|
|
||||||
|
/* ================= 时间 ================= */
|
||||||
private var idleTimer = 0L
|
private var idleTimer = 0L
|
||||||
|
private var recordingStartTime = 0L
|
||||||
private var vadStarted = false
|
private var vadStarted = false
|
||||||
private var vadEndPending = false
|
|
||||||
private var vadEndTime = 0L
|
|
||||||
private val END_SILENCE_MS = 1000L
|
|
||||||
|
|
||||||
/* ================= 外部音频输入 ================= */
|
/* ================= RMS 结束判定 ================= */
|
||||||
private var recordingStartTime = 0L // ✅ 记录录音开始时间
|
private var silenceStartMs = 0L
|
||||||
|
private val SILENCE_END_MS = 1200L // 静音多久算一句结束
|
||||||
|
private val RMS_SILENCE_THRESHOLD = 0.01f // 静音能量阈值
|
||||||
|
private val MIN_SPEECH_DURATION_MS = 800L // 最短有效语音
|
||||||
|
|
||||||
|
/* ================= 音频入口 ================= */
|
||||||
fun acceptAudio(samples: FloatArray) {
|
fun acceptAudio(samples: FloatArray) {
|
||||||
cachePreBuffer(samples)
|
|
||||||
wakeupManager.acceptAudio(samples)
|
wakeupManager.acceptAudio(samples)
|
||||||
|
|
||||||
|
if (state == VoiceState.UPLOADING ||
|
||||||
|
state == VoiceState.PLAYING_PROMPT ||
|
||||||
|
state == VoiceState.PLAYING_BACKEND
|
||||||
|
) return
|
||||||
|
|
||||||
|
if (state == VoiceState.WAIT_SPEECH) {
|
||||||
|
cachePreBuffer(samples)
|
||||||
|
vadManager.accept(samples)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state != VoiceState.RECORDING) return
|
||||||
|
|
||||||
|
// ===== RECORDING =====
|
||||||
|
audioBuffer.addAll(samples.asList())
|
||||||
|
vadManager.accept(samples)
|
||||||
|
|
||||||
val now = System.currentTimeMillis()
|
val now = System.currentTimeMillis()
|
||||||
if (now < wakeupDiscardUntil) return
|
|
||||||
|
|
||||||
when (state) {
|
// 1️⃣ 最大录音兜底
|
||||||
VoiceState.WAIT_SPEECH -> {
|
if (now - recordingStartTime >= maxRecordingSeconds * 1000) {
|
||||||
vadManager.accept(samples)
|
Log.w(TAG, "⏱ Max recording reached")
|
||||||
|
finishSentence()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2️⃣ RMS 静音结束(核心)
|
||||||
|
val rms = calcRms(samples)
|
||||||
|
// Log.d(TAG, "RMS_DEBUG", "rms=${"%.4f".format(rms)}")
|
||||||
|
|
||||||
|
if (rms < RMS_SILENCE_THRESHOLD) {
|
||||||
|
if (silenceStartMs == 0L) {
|
||||||
|
silenceStartMs = now
|
||||||
|
} else if (now - silenceStartMs >= SILENCE_END_MS) {
|
||||||
|
Log.d(TAG, "🔇 RMS silence end")
|
||||||
|
finishSentence()
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
VoiceState.RECORDING -> {
|
silenceStartMs = 0L
|
||||||
audioBuffer.addAll(samples.asList())
|
|
||||||
vadManager.accept(samples)
|
|
||||||
idleTimer = now
|
|
||||||
// ✅ 最大录音时长判断
|
|
||||||
if (now - recordingStartTime >= maxRecordingSeconds * 1000) {
|
|
||||||
Log.d(TAG, "⚠️ 达到最大录音时长,自动结束录音")
|
|
||||||
finishSentence()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if (vadEndPending && now - vadEndTime >= END_SILENCE_MS) {
|
|
||||||
finishSentence()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
else -> Unit
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ================= 提示音 ================= */
|
/* ================= VAD START ================= */
|
||||||
|
private fun onVadStart() {
|
||||||
|
if (state != VoiceState.WAIT_SPEECH) return
|
||||||
|
|
||||||
private val PROMPT_DURATION_MS = 3000L
|
Log.d(TAG, "🎤 VAD START")
|
||||||
private var promptFallbackJob: Job? = null
|
vadStarted = true
|
||||||
|
state = VoiceState.RECORDING
|
||||||
|
recordingStartTime = System.currentTimeMillis()
|
||||||
|
silenceStartMs = 0L
|
||||||
|
|
||||||
fun onPlayStartPrompt() {
|
audioBuffer.addAll(preBuffer)
|
||||||
if (state == VoiceState.PLAYING_PROMPT) return
|
preBuffer.clear()
|
||||||
isPlaying = true
|
}
|
||||||
state = VoiceState.PLAYING_PROMPT
|
|
||||||
|
|
||||||
promptFallbackJob?.cancel()
|
/* ================= 结束录音 ================= */
|
||||||
promptFallbackJob = CoroutineScope(Dispatchers.Main).launch {
|
private fun finishSentence() {
|
||||||
delay(PROMPT_DURATION_MS)
|
val speakTime = System.currentTimeMillis() - recordingStartTime
|
||||||
if (state == VoiceState.PLAYING_PROMPT) {
|
|
||||||
Log.w(TAG, "⚠️ 提示音 complete 丢失,fallback")
|
if (!vadStarted || speakTime < MIN_SPEECH_DURATION_MS) {
|
||||||
onPlayEndPrompt()
|
Log.d(TAG, "⛔ Speech too short, ignore")
|
||||||
}
|
resetToWaitSpeech()
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
val finalAudio = audioBuffer.toFloatArray()
|
||||||
|
audioBuffer.clear()
|
||||||
|
|
||||||
|
state = VoiceState.UPLOADING
|
||||||
|
Log.d(TAG, "⬆ Upload audio len=${finalAudio.size}")
|
||||||
|
onFinalAudio(finalAudio)
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ================= 播放回调 ================= */
|
||||||
|
fun onPlayStartPrompt() {
|
||||||
|
state = VoiceState.PLAYING_PROMPT
|
||||||
}
|
}
|
||||||
|
|
||||||
fun onPlayEndPrompt() {
|
fun onPlayEndPrompt() {
|
||||||
promptFallbackJob?.cancel()
|
|
||||||
if (state != VoiceState.PLAYING_PROMPT) return
|
|
||||||
isPlaying = false
|
|
||||||
state = VoiceState.WAIT_SPEECH
|
state = VoiceState.WAIT_SPEECH
|
||||||
idleTimer = System.currentTimeMillis()
|
idleTimer = System.currentTimeMillis()
|
||||||
Log.d(TAG, "提示音结束 → WAIT_SPEECH")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ================= Backend ================= */
|
|
||||||
|
|
||||||
fun onPlayStartBackend() {
|
fun onPlayStartBackend() {
|
||||||
isPlaying = true
|
|
||||||
state = VoiceState.PLAYING_BACKEND
|
state = VoiceState.PLAYING_BACKEND
|
||||||
}
|
}
|
||||||
|
|
||||||
fun onPlayEndBackend() {
|
fun onPlayEndBackend() {
|
||||||
if (state != VoiceState.PLAYING_BACKEND) return
|
state = VoiceState.WAIT_SPEECH
|
||||||
isPlaying = false
|
idleTimer = System.currentTimeMillis()
|
||||||
state = VoiceState.WAIT_WAKEUP
|
}
|
||||||
|
|
||||||
|
/* ================= 上传回调 ================= */
|
||||||
|
fun onUploadFinished(success: Boolean) {
|
||||||
|
if (state != VoiceState.UPLOADING) return
|
||||||
|
state = if (success) VoiceState.PLAYING_BACKEND else VoiceState.WAIT_SPEECH
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ================= Idle ================= */
|
/* ================= Idle ================= */
|
||||||
|
|
||||||
fun checkIdleTimeout() {
|
fun checkIdleTimeout() {
|
||||||
if (state != VoiceState.WAIT_SPEECH) return
|
if (state != VoiceState.WAIT_SPEECH) return
|
||||||
if (System.currentTimeMillis() - idleTimer > idleTimeoutSeconds * 1000) {
|
if (System.currentTimeMillis() - idleTimer > idleTimeoutSeconds * 1000) {
|
||||||
reset()
|
resetAll()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fun reset() {
|
/* ================= Reset ================= */
|
||||||
state = VoiceState.WAIT_WAKEUP
|
private fun resetToWaitSpeech() {
|
||||||
audioBuffer.clear()
|
audioBuffer.clear()
|
||||||
preBuffer.clear()
|
preBuffer.clear()
|
||||||
vadManager.reset()
|
vadManager.reset()
|
||||||
vadStarted = false
|
vadStarted = false
|
||||||
vadEndPending = false
|
silenceStartMs = 0L
|
||||||
wakeupDiscardUntil = 0L
|
state = VoiceState.WAIT_SPEECH
|
||||||
recordingStartTime = 0L // ✅ 重置录音开始时间
|
idleTimer = System.currentTimeMillis()
|
||||||
Log.d(TAG, "reset → WAIT_WAKEUP")
|
}
|
||||||
|
|
||||||
|
private fun resetAll() {
|
||||||
|
audioBuffer.clear()
|
||||||
|
preBuffer.clear()
|
||||||
|
vadManager.reset()
|
||||||
|
vadStarted = false
|
||||||
|
silenceStartMs = 0L
|
||||||
|
state = VoiceState.WAIT_WAKEUP
|
||||||
}
|
}
|
||||||
|
|
||||||
fun release() {
|
fun release() {
|
||||||
@ -182,84 +193,7 @@ class VoiceController(
|
|||||||
wakeupManager.release()
|
wakeupManager.release()
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ================= VAD 回调 ================= */
|
|
||||||
|
|
||||||
private fun onVadSpeechStart() {
|
|
||||||
if (state != VoiceState.WAIT_SPEECH) return
|
|
||||||
vadStarted = true
|
|
||||||
state = VoiceState.RECORDING
|
|
||||||
audioBuffer.addAll(preBuffer)
|
|
||||||
idleTimer = System.currentTimeMillis()
|
|
||||||
recordingStartTime = System.currentTimeMillis() // ✅ 记录录音开始时间
|
|
||||||
Log.d(TAG, "VAD开始 → RECORDING")
|
|
||||||
}
|
|
||||||
|
|
||||||
private fun onVadSpeechEnd() {
|
|
||||||
if (state != VoiceState.RECORDING) return
|
|
||||||
vadEndPending = true
|
|
||||||
vadEndTime = System.currentTimeMillis()
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ================= 录音结束 & 判定 ================= */
|
|
||||||
|
|
||||||
private fun finishSentence() {
|
|
||||||
vadEndPending = false
|
|
||||||
state = VoiceState.WAIT_WAKEUP
|
|
||||||
|
|
||||||
val finalAudio = audioBuffer.toFloatArray()
|
|
||||||
audioBuffer.clear()
|
|
||||||
|
|
||||||
if (isValidUserSpeech(finalAudio)) {
|
|
||||||
onFinalAudio(finalAudio)
|
|
||||||
Log.d(TAG, "✅ 录音有效,上传")
|
|
||||||
} else {
|
|
||||||
Log.d(TAG, "❌ 噪音/旁人语音,丢弃")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ================= 关键判定函数 ================= */
|
|
||||||
|
|
||||||
private fun isValidUserSpeech(audio: FloatArray): Boolean {
|
|
||||||
if (!vadStarted) {
|
|
||||||
Log.d(TAG, "❌ VAD 未触发")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// 1️⃣ 时长:>= 600ms(非常宽松)
|
|
||||||
val durationMs = audio.size * 1000f / 16000f
|
|
||||||
if (durationMs < 600f) {
|
|
||||||
Log.d(TAG, "❌ 太短: ${durationMs}ms")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2️⃣ 计算 RMS(真实设备极低)
|
|
||||||
var sum = 0f
|
|
||||||
var peak = 0f
|
|
||||||
for (v in audio) {
|
|
||||||
val a = kotlin.math.abs(v)
|
|
||||||
sum += a * a
|
|
||||||
if (a > peak) peak = a
|
|
||||||
}
|
|
||||||
val rms = kotlin.math.sqrt(sum / audio.size)
|
|
||||||
|
|
||||||
Log.d(TAG, "🎤 RMS=$rms peak=$peak duration=${durationMs}ms")
|
|
||||||
|
|
||||||
// 3️⃣ 只排除“纯底噪”
|
|
||||||
// 实测:环境底噪 RMS 通常 < 0.001
|
|
||||||
if (rms < 0.002f && peak < 0.01f) {
|
|
||||||
Log.d(TAG, "❌ 纯环境噪声,丢弃")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// ✅ 只要不是纯噪声,一律认为是人说话
|
|
||||||
Log.d(TAG, "✅ 判定为有效人声")
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/* ================= 工具 ================= */
|
/* ================= 工具 ================= */
|
||||||
|
|
||||||
private fun cachePreBuffer(samples: FloatArray) {
|
private fun cachePreBuffer(samples: FloatArray) {
|
||||||
for (s in samples) {
|
for (s in samples) {
|
||||||
preBuffer.addLast(s)
|
preBuffer.addLast(s)
|
||||||
@ -269,7 +203,11 @@ class VoiceController(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private fun playLocalPrompt() {
|
private fun calcRms(audio: FloatArray): Float {
|
||||||
onPlayStartPrompt()
|
var sum = 0f
|
||||||
|
for (v in audio) {
|
||||||
|
sum += v * v
|
||||||
|
}
|
||||||
|
return sqrt(sum / audio.size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -10,5 +10,6 @@ enum class VoiceState {
|
|||||||
PLAYING_PROMPT, // 播放本地提示音
|
PLAYING_PROMPT, // 播放本地提示音
|
||||||
WAIT_SPEECH, // 等待用户说话
|
WAIT_SPEECH, // 等待用户说话
|
||||||
RECORDING, // 用户正在说话
|
RECORDING, // 用户正在说话
|
||||||
|
UPLOADING, //音频上传中
|
||||||
PLAYING_BACKEND // 播放后台返回音频
|
PLAYING_BACKEND // 播放后台返回音频
|
||||||
}
|
}
|
||||||
@ -5,6 +5,7 @@ import android.graphics.drawable.AnimationDrawable
|
|||||||
import android.graphics.drawable.Drawable
|
import android.graphics.drawable.Drawable
|
||||||
import android.view.View
|
import android.view.View
|
||||||
import android.view.animation.LinearInterpolator
|
import android.view.animation.LinearInterpolator
|
||||||
|
import androidx.lifecycle.lifecycleScope
|
||||||
import com.bumptech.glide.Glide
|
import com.bumptech.glide.Glide
|
||||||
import com.bumptech.glide.request.target.ImageViewTarget
|
import com.bumptech.glide.request.target.ImageViewTarget
|
||||||
import com.bumptech.glide.request.transition.Transition
|
import com.bumptech.glide.request.transition.Transition
|
||||||
@ -14,8 +15,12 @@ import com.zs.smarthuman.bean.NetworkStatusEventMsg
|
|||||||
import com.zs.smarthuman.bean.QRCodeResp
|
import com.zs.smarthuman.bean.QRCodeResp
|
||||||
import com.zs.smarthuman.databinding.ActivityActivateBinding
|
import com.zs.smarthuman.databinding.ActivityActivateBinding
|
||||||
import com.zs.smarthuman.http.ApiResult
|
import com.zs.smarthuman.http.ApiResult
|
||||||
|
import com.zs.smarthuman.im.chat.MessageContentType
|
||||||
import com.zs.smarthuman.im.chat.bean.SingleMessage
|
import com.zs.smarthuman.im.chat.bean.SingleMessage
|
||||||
|
import com.zs.smarthuman.kt.startActivity
|
||||||
import com.zs.smarthuman.viewmodel.ActivateViewModel
|
import com.zs.smarthuman.viewmodel.ActivateViewModel
|
||||||
|
import kotlinx.coroutines.Dispatchers
|
||||||
|
import kotlinx.coroutines.launch
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @description:二维码激活页面
|
* @description:二维码激活页面
|
||||||
@ -90,7 +95,15 @@ class ActivateActivity : BaseViewModelActivity<ActivityActivateBinding, Activate
|
|||||||
}
|
}
|
||||||
|
|
||||||
override fun receivedIMMsg(msg: SingleMessage) {
|
override fun receivedIMMsg(msg: SingleMessage) {
|
||||||
|
when (msg.msgContentType) {
|
||||||
|
MessageContentType.ACTIVE_SUCCESS_ENTER_MAIN.msgContentType -> {
|
||||||
|
lifecycleScope.launch(Dispatchers.Main){
|
||||||
|
startActivity(MainActivity::class)
|
||||||
|
overridePendingTransition(0,0)
|
||||||
|
finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun onMessageEvent(event: Any) {
|
override fun onMessageEvent(event: Any) {
|
||||||
|
|||||||
@ -9,6 +9,7 @@ import android.content.pm.PackageManager
|
|||||||
import android.media.AudioFormat
|
import android.media.AudioFormat
|
||||||
import android.media.AudioRecord
|
import android.media.AudioRecord
|
||||||
import android.media.MediaRecorder
|
import android.media.MediaRecorder
|
||||||
|
import android.media.audiofx.AcousticEchoCanceler
|
||||||
import android.os.Bundle
|
import android.os.Bundle
|
||||||
import android.os.Environment
|
import android.os.Environment
|
||||||
import android.os.IBinder
|
import android.os.IBinder
|
||||||
@ -95,6 +96,7 @@ class MainActivity : BaseViewModelActivity<ActivityMainBinding, MainViewModel>()
|
|||||||
PermissionUtils.permissionGroup(PermissionConstants.MICROPHONE)
|
PermissionUtils.permissionGroup(PermissionConstants.MICROPHONE)
|
||||||
.callback(object : PermissionUtils.FullCallback {
|
.callback(object : PermissionUtils.FullCallback {
|
||||||
override fun onGranted(granted: List<String?>) {
|
override fun onGranted(granted: List<String?>) {
|
||||||
|
initVoiceController()
|
||||||
initMicrophone()
|
initMicrophone()
|
||||||
startRecording()
|
startRecording()
|
||||||
}
|
}
|
||||||
@ -109,8 +111,6 @@ class MainActivity : BaseViewModelActivity<ActivityMainBinding, MainViewModel>()
|
|||||||
}).request()
|
}).request()
|
||||||
|
|
||||||
|
|
||||||
initVoiceController()
|
|
||||||
|
|
||||||
requestUserInfo()
|
requestUserInfo()
|
||||||
initObserver()
|
initObserver()
|
||||||
}
|
}
|
||||||
@ -158,7 +158,7 @@ class MainActivity : BaseViewModelActivity<ActivityMainBinding, MainViewModel>()
|
|||||||
voiceInfo = mutableListOf<VoiceBeanResp>().apply {
|
voiceInfo = mutableListOf<VoiceBeanResp>().apply {
|
||||||
add(
|
add(
|
||||||
VoiceBeanResp(
|
VoiceBeanResp(
|
||||||
audioUrl = UserInfoManager.userInfo?.wakeUpAudioUrl ?: ""
|
audioUrl = /*UserInfoManager.userInfo?.wakeUpAudioUrl ?:*/ "https://static.seerteach.net/aidialogue/systemVoice/aliyun-nv.mp3"
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -166,10 +166,11 @@ class MainActivity : BaseViewModelActivity<ActivityMainBinding, MainViewModel>()
|
|||||||
},
|
},
|
||||||
onFinalAudio = { audio ->
|
onFinalAudio = { audio ->
|
||||||
Log.d("lrs", "检测到语音,长度=${audio.size}")
|
Log.d("lrs", "检测到语音,长度=${audio.size}")
|
||||||
mViewModel?.uploadVoice(
|
// mViewModel?.uploadVoice(
|
||||||
AudioPcmUtil.pcm16ToBase64(AudioPcmUtil.floatToPcm16(audio)),
|
// AudioPcmUtil.pcm16ToBase64(AudioPcmUtil.floatToPcm16(audio)),
|
||||||
1
|
// 1
|
||||||
)
|
// )
|
||||||
|
loadLocalJsonAndPlay()
|
||||||
val file = File(
|
val file = File(
|
||||||
getExternalFilesDir(Environment.DIRECTORY_DOWNLOADS)!!.getAbsolutePath(),
|
getExternalFilesDir(Environment.DIRECTORY_DOWNLOADS)!!.getAbsolutePath(),
|
||||||
"xxx.wav"
|
"xxx.wav"
|
||||||
@ -192,6 +193,7 @@ class MainActivity : BaseViewModelActivity<ActivityMainBinding, MainViewModel>()
|
|||||||
VoiceState.RECORDING -> Log.d("lrs", "当前状态: 正在录音")
|
VoiceState.RECORDING -> Log.d("lrs", "当前状态: 正在录音")
|
||||||
VoiceState.PLAYING_PROMPT -> Log.d("lrs", "当前状态: 播放本地音频")
|
VoiceState.PLAYING_PROMPT -> Log.d("lrs", "当前状态: 播放本地音频")
|
||||||
VoiceState.PLAYING_BACKEND -> Log.d("lrs", "当前状态: 播放后台音频")
|
VoiceState.PLAYING_BACKEND -> Log.d("lrs", "当前状态: 播放后台音频")
|
||||||
|
else -> {}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
@ -255,26 +257,38 @@ class MainActivity : BaseViewModelActivity<ActivityMainBinding, MainViewModel>()
|
|||||||
if (audioRecord?.state != AudioRecord.STATE_INITIALIZED) {
|
if (audioRecord?.state != AudioRecord.STATE_INITIALIZED) {
|
||||||
Log.e("VoiceService", "Failed to initialize AudioRecord")
|
Log.e("VoiceService", "Failed to initialize AudioRecord")
|
||||||
}
|
}
|
||||||
|
enableSystemAec(audioRecord!!)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private var aec: AcousticEchoCanceler? = null
|
||||||
|
|
||||||
|
private fun enableSystemAec(record: AudioRecord) {
|
||||||
|
if (!AcousticEchoCanceler.isAvailable()) {
|
||||||
|
Log.w("VoiceService", "System AEC not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
aec = AcousticEchoCanceler.create(record.audioSessionId)
|
||||||
|
aec?.enabled = true
|
||||||
|
|
||||||
|
Log.d("VoiceService", "✅ System AEC enabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
//开始录音
|
//开始录音
|
||||||
fun startRecording() {
|
fun startRecording() {
|
||||||
isRecording = true
|
isRecording = true
|
||||||
audioRecord?.startRecording()
|
audioRecord?.startRecording()
|
||||||
|
|
||||||
lifecycleScope.launch(Dispatchers.IO) {
|
lifecycleScope.launch(Dispatchers.IO) {
|
||||||
val buf = ShortArray(640)
|
val buf = ShortArray(512)
|
||||||
while (isRecording) {
|
while (isRecording) {
|
||||||
val n = audioRecord?.read(buf, 0, buf.size) ?: 0
|
val n = audioRecord?.read(buf, 0, buf.size) ?: 0
|
||||||
if (n > 0) {
|
if (n > 0) {
|
||||||
val raw = FloatArray(n) { buf[it] / 32768f }
|
val raw = FloatArray(n) { buf[it] / 32768f }
|
||||||
|
|
||||||
// 播放时 duck
|
|
||||||
val ducked = if (voiceController?.isPlaying == true) {
|
|
||||||
FloatArray(n) { raw[it] * 0.4f }
|
|
||||||
} else raw
|
|
||||||
|
|
||||||
voiceController?.acceptAudio(agc(ducked))
|
voiceController?.acceptAudio(raw)
|
||||||
}
|
}
|
||||||
voiceController?.checkIdleTimeout()
|
voiceController?.checkIdleTimeout()
|
||||||
}
|
}
|
||||||
@ -291,23 +305,6 @@ class MainActivity : BaseViewModelActivity<ActivityMainBinding, MainViewModel>()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private fun agc(
|
|
||||||
input: FloatArray,
|
|
||||||
target: Float = 0.035f, // ⬅️ 降
|
|
||||||
maxGain: Float = 4f // ⬅️ 关键
|
|
||||||
): FloatArray {
|
|
||||||
var sum = 0f
|
|
||||||
for (v in input) sum += v * v
|
|
||||||
val rms = sqrt(sum / input.size)
|
|
||||||
if (rms < 1e-6) return input
|
|
||||||
|
|
||||||
val gain = (target / rms).coerceAtMost(maxGain)
|
|
||||||
return FloatArray(input.size) {
|
|
||||||
(input[it] * gain).coerceIn(-1f, 1f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
fun onUnityResourcesLoaded(message: String) { // 这是 Unity 调用的资源加载完成回调方法
|
fun onUnityResourcesLoaded(message: String) { // 这是 Unity 调用的资源加载完成回调方法
|
||||||
lifecycleScope.launch(Dispatchers.Main) {
|
lifecycleScope.launch(Dispatchers.Main) {
|
||||||
@ -315,7 +312,9 @@ class MainActivity : BaseViewModelActivity<ActivityMainBinding, MainViewModel>()
|
|||||||
binding.flDigitalHuman.translationY = 0f
|
binding.flDigitalHuman.translationY = 0f
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private var promptPlaying = false
|
private var promptPlaying = false
|
||||||
|
private var backPlaying = false
|
||||||
fun onAudioProgressUpdated( // Unity 调用此方法传递音频进度
|
fun onAudioProgressUpdated( // Unity 调用此方法传递音频进度
|
||||||
progress: Float,
|
progress: Float,
|
||||||
state: Int,//0stop 2pause 1play 3complete 4loading 5error
|
state: Int,//0stop 2pause 1play 3complete 4loading 5error
|
||||||
@ -323,9 +322,9 @@ class MainActivity : BaseViewModelActivity<ActivityMainBinding, MainViewModel>()
|
|||||||
word: String,
|
word: String,
|
||||||
audioUrl: String
|
audioUrl: String
|
||||||
) {
|
) {
|
||||||
val wakeupUrl = UserInfoManager.userInfo?.wakeUpAudioUrl ?: return
|
// val wakeupUrl = UserInfoManager.userInfo?.wakeUpAudioUrl ?: return
|
||||||
|
//
|
||||||
if (audioUrl != wakeupUrl) return
|
// if (audioUrl != wakeupUrl) return
|
||||||
|
|
||||||
when (state) {
|
when (state) {
|
||||||
1 -> { // play
|
1 -> { // play
|
||||||
@ -337,7 +336,6 @@ class MainActivity : BaseViewModelActivity<ActivityMainBinding, MainViewModel>()
|
|||||||
|
|
||||||
3 -> { // complete
|
3 -> { // complete
|
||||||
if (promptPlaying) {
|
if (promptPlaying) {
|
||||||
Toaster.showShort("借宿了")
|
|
||||||
promptPlaying = false
|
promptPlaying = false
|
||||||
voiceController?.onPlayEndPrompt()
|
voiceController?.onPlayEndPrompt()
|
||||||
}
|
}
|
||||||
@ -352,11 +350,41 @@ class MainActivity : BaseViewModelActivity<ActivityMainBinding, MainViewModel>()
|
|||||||
text: String,
|
text: String,
|
||||||
isFinal: Boolean
|
isFinal: Boolean
|
||||||
) {
|
) {
|
||||||
if (state == 1) {
|
when (state) {
|
||||||
voiceController?.onPlayStartBackend()
|
1 -> { // play
|
||||||
|
if (!backPlaying) {
|
||||||
|
backPlaying = true
|
||||||
|
voiceController?.onPlayStartBackend()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
3 -> { // complete
|
||||||
|
if (backPlaying) {
|
||||||
|
Toaster.showShort("借宿了")
|
||||||
|
backPlaying = false
|
||||||
|
voiceController?.onPlayEndBackend()
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (state == 3) {
|
}
|
||||||
voiceController?.onPlayEndBackend()
|
|
||||||
|
|
||||||
|
private fun loadLocalJsonAndPlay() {
|
||||||
|
lifecycleScope.launch(Dispatchers.IO) {
|
||||||
|
try {
|
||||||
|
val jsonStr = assets
|
||||||
|
.open("readEnd.json")
|
||||||
|
.bufferedReader()
|
||||||
|
.use { it.readText() }
|
||||||
|
|
||||||
|
UnityPlayerHolder
|
||||||
|
.getInstance()
|
||||||
|
.startTalking(jsonStr)
|
||||||
|
|
||||||
|
} catch (e: Exception) {
|
||||||
|
e.printStackTrace()
|
||||||
|
LogUtils.eTag("lrs", "loadLocalJsonAndPlay error: ${e.message}")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -0,0 +1,181 @@
|
|||||||
|
package com.zs.smarthuman.utils
|
||||||
|
|
||||||
|
import android.annotation.SuppressLint
|
||||||
|
import android.media.AudioFormat
|
||||||
|
import android.media.AudioManager
|
||||||
|
import android.media.AudioRecord
|
||||||
|
import android.media.AudioTrack
|
||||||
|
import android.media.MediaRecorder
|
||||||
|
import android.os.Environment
|
||||||
|
import android.util.Base64
|
||||||
|
import android.util.Log
|
||||||
|
import com.blankj.utilcode.util.LogUtils
|
||||||
|
import com.zs.smarthuman.App
|
||||||
|
import com.zs.smarthuman.sherpa.VoiceController
|
||||||
|
import com.zs.smarthuman.utils.AudioDebugUtil
|
||||||
|
import java.io.File
|
||||||
|
import java.nio.ByteBuffer
|
||||||
|
import java.nio.ByteOrder
|
||||||
|
import java.util.concurrent.ArrayBlockingQueue
|
||||||
|
import kotlin.concurrent.thread
|
||||||
|
|
||||||
|
class PcmAudioWithAecManager(
|
||||||
|
private val voiceController: VoiceController
|
||||||
|
) {
|
||||||
|
companion object {
|
||||||
|
private const val TAG = "PcmAudioWithAecManager"
|
||||||
|
}
|
||||||
|
|
||||||
|
private val sampleRate = 16000
|
||||||
|
private val channelConfig = AudioFormat.CHANNEL_IN_MONO
|
||||||
|
private val audioFormat = AudioFormat.ENCODING_PCM_16BIT
|
||||||
|
private val bufferSize = AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat)
|
||||||
|
private var audioRecord: AudioRecord? = null
|
||||||
|
private var audioTrack: AudioTrack? = null
|
||||||
|
private val playQueue = ArrayBlockingQueue<ShortArray>(200)
|
||||||
|
|
||||||
|
@Volatile
|
||||||
|
var isRecording = false
|
||||||
|
|
||||||
|
@Volatile
|
||||||
|
private var isPlaying = false
|
||||||
|
|
||||||
|
private val rawBuffer = mutableListOf<Float>()
|
||||||
|
private val aecBuffer = mutableListOf<Float>()
|
||||||
|
|
||||||
|
init {
|
||||||
|
initAudioRecord()
|
||||||
|
initAudioTrack()
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressLint("MissingPermission")
|
||||||
|
private fun initAudioRecord() {
|
||||||
|
audioRecord = AudioRecord(
|
||||||
|
MediaRecorder.AudioSource.VOICE_COMMUNICATION,
|
||||||
|
sampleRate,
|
||||||
|
channelConfig,
|
||||||
|
audioFormat,
|
||||||
|
bufferSize
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun initAudioTrack() {
|
||||||
|
val trackBufferSize = AudioTrack.getMinBufferSize(
|
||||||
|
sampleRate,
|
||||||
|
AudioFormat.CHANNEL_OUT_MONO,
|
||||||
|
audioFormat
|
||||||
|
)
|
||||||
|
|
||||||
|
audioTrack = AudioTrack(
|
||||||
|
AudioManager.STREAM_MUSIC,
|
||||||
|
sampleRate,
|
||||||
|
AudioFormat.CHANNEL_OUT_MONO,
|
||||||
|
audioFormat,
|
||||||
|
trackBufferSize,
|
||||||
|
AudioTrack.MODE_STREAM
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/** 开始录音并进行处理 */
|
||||||
|
fun startRecording() {
|
||||||
|
if (isRecording) return
|
||||||
|
isRecording = true
|
||||||
|
audioRecord?.startRecording()
|
||||||
|
|
||||||
|
thread {
|
||||||
|
val buffer = ShortArray(bufferSize / 2)
|
||||||
|
while (isRecording) {
|
||||||
|
val read = audioRecord?.read(buffer, 0, buffer.size) ?: 0
|
||||||
|
if (read > 0) {
|
||||||
|
// 录音数据处理
|
||||||
|
processCapture(buffer.copyOf(read))
|
||||||
|
}
|
||||||
|
voiceController.checkIdleTimeout()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** 停止录音并保存 WAV */
|
||||||
|
fun stopRecording() {
|
||||||
|
isRecording = false
|
||||||
|
audioRecord?.stop()
|
||||||
|
// Log.d(TAG, "rawBuffer:${rawBuffer.size},${aecBuffer.size}")
|
||||||
|
// if (rawBuffer.isNotEmpty()) {
|
||||||
|
// val rawFile = File(
|
||||||
|
// App.getInstance().getExternalFilesDir(Environment.DIRECTORY_DOWNLOADS)!!.absolutePath,
|
||||||
|
// "raw.wav"
|
||||||
|
// )
|
||||||
|
// AudioDebugUtil.saveFloatPcmAsWav(rawBuffer.toFloatArray(), rawFile)
|
||||||
|
// Log.d(TAG, "RAW WAV saved: ${rawFile.absolutePath}")
|
||||||
|
// rawBuffer.clear()
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// if (aecBuffer.isNotEmpty()) {
|
||||||
|
// val aecFile = File(
|
||||||
|
// App.getInstance().getExternalFilesDir(Environment.DIRECTORY_DOWNLOADS)!!.absolutePath,
|
||||||
|
// "aec_clean.wav"
|
||||||
|
// )
|
||||||
|
// AudioDebugUtil.saveFloatPcmAsWav(aecBuffer.toFloatArray(), aecFile)
|
||||||
|
// Log.d(TAG, "AEC WAV saved: ${aecFile.absolutePath}")
|
||||||
|
// aecBuffer.clear()
|
||||||
|
// }
|
||||||
|
}
|
||||||
|
|
||||||
|
/** 开始播放队列音频 */
|
||||||
|
fun startPlaying() {
|
||||||
|
if (isPlaying) return
|
||||||
|
isPlaying = true
|
||||||
|
audioTrack?.play()
|
||||||
|
|
||||||
|
thread {
|
||||||
|
while (isPlaying) {
|
||||||
|
try {
|
||||||
|
val buffer = playQueue.take()
|
||||||
|
// 播放音频时不暂停录音,继续录音
|
||||||
|
audioTrack?.write(buffer, 0, buffer.size)
|
||||||
|
} catch (e: InterruptedException) {
|
||||||
|
e.printStackTrace()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** 停止播放 */
|
||||||
|
fun stopPlaying() {
|
||||||
|
isPlaying = false
|
||||||
|
audioTrack?.stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
/** 处理录音音频 */
|
||||||
|
private fun processCapture(buffer: ShortArray) {
|
||||||
|
// 保存原始 PCM
|
||||||
|
val floatRaw = FloatArray(buffer.size) { buffer[it] / 32768f }
|
||||||
|
rawBuffer.addAll(floatRaw.toList())
|
||||||
|
|
||||||
|
// 向 VoiceController 发送数据进行唤醒词检测
|
||||||
|
voiceController.acceptAudio(floatRaw)
|
||||||
|
}
|
||||||
|
|
||||||
|
/** 推入播放队列 */
|
||||||
|
private fun pushPlayBuffer(buffer: ShortArray) {
|
||||||
|
if (!playQueue.offer(buffer)) {
|
||||||
|
Log.w(TAG, "playQueue 满了,丢弃一帧")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** 播放后台 Base64 音频 */
|
||||||
|
fun playBackendAudio(base64: String) {
|
||||||
|
val bytes = Base64.decode(base64, Base64.DEFAULT)
|
||||||
|
val shortBuffer = ShortArray(bytes.size / 2)
|
||||||
|
ByteBuffer.wrap(bytes).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().get(shortBuffer)
|
||||||
|
pushPlayBuffer(shortBuffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
/** 释放资源 */
|
||||||
|
fun release() {
|
||||||
|
stopRecording()
|
||||||
|
stopPlaying()
|
||||||
|
audioRecord?.release()
|
||||||
|
audioTrack?.release()
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -25,14 +25,14 @@ object SerialNumberUtil {
|
|||||||
* 外部调用,获取最终序列号
|
* 外部调用,获取最终序列号
|
||||||
*/
|
*/
|
||||||
fun getSerialNumber(): String {
|
fun getSerialNumber(): String {
|
||||||
// for (key in snKeys) {
|
for (key in snKeys) {
|
||||||
// val sn = getProp(key)
|
val sn = getProp(key)
|
||||||
// if (!sn.isNullOrBlank()) {
|
if (!sn.isNullOrBlank()) {
|
||||||
// return limitSerialDigit(sn)
|
return limitSerialDigit(sn)
|
||||||
// }
|
}
|
||||||
// }
|
}
|
||||||
// return ""
|
return ""
|
||||||
return "zd09312051870556"
|
// return "zd09312051870556"
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
BIN
app/src/main/res/drawable/icon_people_speaking.webp
Normal file
BIN
app/src/main/res/drawable/icon_people_speaking.webp
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.9 MiB |
@ -31,5 +31,12 @@
|
|||||||
app:layout_constraintTop_toTopOf="parent"/>
|
app:layout_constraintTop_toTopOf="parent"/>
|
||||||
|
|
||||||
|
|
||||||
|
<com.shuyu.gsyvideoplayer.video.StandardGSYVideoPlayer
|
||||||
|
android:id="@+id/standardGSYVideoPlayer"
|
||||||
|
android:layout_width="0dp"
|
||||||
|
android:layout_height="0dp"
|
||||||
|
app:layout_constraintBottom_toBottomOf="parent"
|
||||||
|
app:layout_constraintEnd_toEndOf="parent"/>
|
||||||
|
|
||||||
|
|
||||||
</androidx.constraintlayout.widget.ConstraintLayout>
|
</androidx.constraintlayout.widget.ConstraintLayout>
|
||||||
@ -8,7 +8,7 @@ dependencies {
|
|||||||
|
|
||||||
android {
|
android {
|
||||||
namespace "com.unity3d.player"
|
namespace "com.unity3d.player"
|
||||||
ndkPath "D:\\Android\\sdk\\ndk\\29.0.13846066"
|
ndkPath "D:\\Android\\sdk\\ndk\\27.0.12077973"
|
||||||
compileSdkVersion 35
|
compileSdkVersion 35
|
||||||
buildToolsVersion '34.0.0'
|
buildToolsVersion '34.0.0'
|
||||||
|
|
||||||
|
|||||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Loading…
x
Reference in New Issue
Block a user