feat: implement real-time recording pipeline specs and audio UI updates
This commit is contained in:
parent
9857498632
commit
f8e66f3057
@ -1557,23 +1557,38 @@ xray.module5: track1(数组,每项含node/action/strategy/purpose), track2(数
|
||||
db = SessionDB()
|
||||
with db._lock:
|
||||
cursor = db._conn.execute(
|
||||
"SELECT client_id, name, phone, created_at, updated_at FROM deepview_clients WHERE user_id=? ORDER BY updated_at DESC",
|
||||
"SELECT c.client_id, c.name, c.phone, c.created_at, c.updated_at, p.profile_json "
|
||||
"FROM deepview_clients c "
|
||||
"LEFT JOIN deepview_client_profiles p ON c.client_id = p.client_id "
|
||||
"WHERE c.user_id=? ORDER BY c.updated_at DESC",
|
||||
(user["userId"],)
|
||||
)
|
||||
rows = cursor.fetchall()
|
||||
|
||||
clients = []
|
||||
for row in rows:
|
||||
c_id, c_name, c_phone, c_created, c_updated = row
|
||||
c_id, c_name, c_phone, c_created, c_updated, p_json = row
|
||||
|
||||
# Option 1: To get report count, we could do SQL count or just skip it for now.
|
||||
# Here we just map directly.
|
||||
ltcStatus = None
|
||||
if p_json:
|
||||
try:
|
||||
profile = json.loads(p_json)
|
||||
topics = profile.get("nextVisitBrief", {}).get("topicsToPrepare", [])
|
||||
risks = profile.get("nextVisitBrief", {}).get("keyRisks", [])
|
||||
ltcStatus = {
|
||||
"topics": topics,
|
||||
"risks": risks
|
||||
}
|
||||
except:
|
||||
pass
|
||||
|
||||
clients.append({
|
||||
"id": c_id,
|
||||
"name": c_name,
|
||||
"phone": c_phone,
|
||||
"createdAt": c_created,
|
||||
"updatedAt": c_updated
|
||||
"updatedAt": c_updated,
|
||||
"ltcStatus": ltcStatus
|
||||
})
|
||||
|
||||
return web.json_response({"clients": clients}, headers=_CORS_HEADERS)
|
||||
@ -1937,6 +1952,17 @@ xray.module5: track1(数组,每项含node/action/strategy/purpose), track2(数
|
||||
# Persist to DB
|
||||
from hermes_state import SessionDB
|
||||
profileDb = SessionDB()
|
||||
|
||||
# 绝对真理:客户姓名是确定性数据,无论 AI 填了什么都需要以人工录入的数据库记录为准
|
||||
try:
|
||||
with profileDb._lock:
|
||||
name_cursor = profileDb._conn.execute("SELECT name FROM deepview_clients WHERE client_id=?", (clientId,))
|
||||
name_row = name_cursor.fetchone()
|
||||
if name_row and name_row[0]:
|
||||
parsedProfile["clientName"] = name_row[0]
|
||||
except Exception as e:
|
||||
logger.warning(f"[DeepviewSSE] Failed to lookup client name for forceful overwrite: {e}")
|
||||
|
||||
profileJson = json.dumps(parsedProfile, ensure_ascii=False)
|
||||
with profileDb._lock:
|
||||
profileDb._conn.execute(
|
||||
@ -1987,6 +2013,14 @@ xray.module5: track1(数组,每项含node/action/strategy/purpose), track2(数
|
||||
return web.json_response({"error": "No profile report yet", "clientId": clientId}, status=404, headers=_CORS_HEADERS)
|
||||
|
||||
profileData = json.loads(row[0])
|
||||
|
||||
# 绝对真理:每次读取时,强制用数据库当前的真实姓名覆盖 JSON(防止改名不同步或 AI 幻觉)
|
||||
with db._lock:
|
||||
name_cursor = db._conn.execute("SELECT name FROM deepview_clients WHERE client_id=?", (clientId,))
|
||||
name_row = name_cursor.fetchone()
|
||||
if name_row and name_row[0]:
|
||||
profileData["clientName"] = name_row[0]
|
||||
|
||||
safeBody = json.dumps({"success": True, "data": profileData, "generatedAt": row[1]}, ensure_ascii=False)
|
||||
return web.Response(text=safeBody, content_type="application/json", headers=_CORS_HEADERS)
|
||||
except Exception as e:
|
||||
|
||||
@ -43,9 +43,7 @@ description: 深维面诊智能军师
|
||||
1. **L0 是唯一的终极证据源**。所有核心论断必须能追溯到 L0 文件中的原文。
|
||||
2. **L1 可用于快速掌握历史**。但引用原声时,必须回溯 L0 文件验证后才可引用。
|
||||
3. **L2 仅供参考**。如果 L2(profile.md)中的结论与 L0 原文冲突,以 L0 为准。L2 是你上一次的工作产出,不是事实来源。
|
||||
4. **引用格式**:所有洞察标注格式为 `(来源: {文件名} L{级别}, {行号}行)`。
|
||||
- ✅ `(来源: rep_bc439351/asr.md L0, 125行)`
|
||||
- ❌ `(来源: profile.md)` ← 禁止引用 L2 作为唯一证据
|
||||
4. **内化证据原则**:你在梳理和推理时必须基于 L0 的坚实证据,但最后**在输出面向使用者的报告正文时,请保持自然流畅的业务陈述,不要机械地插入类似 `(来源: asr.md 行号)`的引用标记**。证据应无缝融合在你的洞察描述中,以降低使用者的阅读负担,并确保你的注意力集中在提炼有价值的结论上。
|
||||
|
||||
### 规模化阅读策略(录音数 > 5 条时)
|
||||
- **先读 L1**(report_draft.md)快速掌握每条录音的核心结论
|
||||
|
||||
@ -10,13 +10,13 @@
|
||||
3. **参考但不依赖** 已有的 `profile.md`(L2),它是你上一轮的产出,不是事实来源
|
||||
|
||||
## 输出必须包含的章节(缺一不可)
|
||||
1. 客户画像(审美底线、痛感耐受度、决策风格)——每条必须引用 L0 原始录音文件名和大致行号作为证据
|
||||
2. 信任轨迹:已信任项目列表(含 L0 证据)+ 被拒项目列表(含拒绝次数、原声引用、AI拒因分析)
|
||||
1. 客户画像(审美底线、痛感耐受度、决策风格)
|
||||
2. 信任轨迹:已信任项目列表(关键点提取)+ 被拒项目列表(被拒原因、AI拒因洞察)
|
||||
3. 面诊统计汇总:基于所有录音的平均信任指数趋势、核心诉求、最近一次接纳度
|
||||
4. 下次面诊准备要点:客户遗留的未解决问题、曾主动询问但未成交的项目
|
||||
|
||||
## 🚨 绝对禁止
|
||||
- 禁止编造任何"话术"(破冰话术、价值转化话术等)——这不是你的工作
|
||||
- 禁止编造 CRM 数据(会员等级、LTV、NPS 评分等)——你没有这些数据源
|
||||
- 所有洞察必须附带录音来源引用(格式:`来源: {文件名} L{级别}, {行号}行`),找不到 L0 证据的字段留空
|
||||
- 结论必须以 L0 证据为内核,但在生成的正文中保持行文流畅,禁止插入类似 `(来源: asr.md 行号)` 的格式标记
|
||||
- 禁止引用 profile.md (L2) 作为唯一证据——L2 是你自己的上一轮产出
|
||||
|
||||
162
docs/SPEC_realtime_recording_pipeline_v1.md
Normal file
162
docs/SPEC_realtime_recording_pipeline_v1.md
Normal file
@ -0,0 +1,162 @@
|
||||
# SPEC: 实时录音管线 — 复用清单与统一管线设计 (V1)
|
||||
|
||||
## 0. 核心设计理念
|
||||
|
||||
**"实时录音"就是"前端帮用户自动生成了一个音频文件,然后上传"。**
|
||||
|
||||
已有管线(`runRealPipeline` → OSS → `confirmUpload` → `_extract_audio_asr` → `asr.md`)已经是一条完整的从"音频文件"到"Markdown 落盘"的闭环链路。
|
||||
实时录音功能的目标不是另造一条管线,而是**为这条管线补一个新的文件来源入口**。
|
||||
|
||||
---
|
||||
|
||||
## 1. 从 MindOS 复用什么
|
||||
|
||||
### 1.1 可直接迁移代码(Android 保活三件套)
|
||||
|
||||
MindOS `mindOSv2/frontend/src-tauri/` 中以下文件可以**几乎原样迁移**到 Deepview 的 Tauri 工程中:
|
||||
|
||||
| 源文件(mindOSv2 路径) | 职责 | 迁移改动量 |
|
||||
|---|---|---|
|
||||
| `gen/android/.../RecordingForegroundService.kt` | 前台服务保活 + WakeLock(4h 硬超时) | **零修改** — 职责完全对等 |
|
||||
| `gen/android/.../RecordingServicePlugin.kt` | Tauri Plugin 桥:`start_service` / `stop_service` | **零修改** — 接口只有两个 |
|
||||
| `gen/android/AndroidManifest.xml`(权限片段) | `FOREGROUND_SERVICE_MICROPHONE` (Android 14+) 等完整权限声明 | **合并声明** — 新 manifest 追加 5 行 |
|
||||
|
||||
### 1.2 可借鉴但需裁剪的模块
|
||||
|
||||
| 模块 | MindOS 实现 | Deepview 裁剪方案 |
|
||||
|---|---|---|
|
||||
| **V4 Audio Capture(Rust cpal)** | 实时 PCM → WebSocket → 云端 ASR(实时流式) | ❌ **不复用**。Deepview 场景是面诊录完就分析,无需实时流。改用 Android `MediaRecorder` 录制本地 `.m4a` 文件即可,极大简化。 |
|
||||
| **WsAudioSender + Resampler** | 混音 + 重采样 + tungstenite WS 推送 | ❌ **不复用**。无系统音频混录需求、无实时转写需求。 |
|
||||
| **Adapter Pattern(PlatformService)** | 前端 `isTauri ? TauriAdapter : WebAdapter` | ✅ **复用模式**。Deepview 前端需相同切换:Web 浏览器走 `<input file>` 上传;Tauri Android 走 `invoke()` 本地录音。 |
|
||||
|
||||
### 1.3 绝对不复用的部分
|
||||
|
||||
- **Rust cpal 音频捕获引擎**:MindOS 是实时会议转写(需要毫秒级推流),Deepview 是事后分析(只需完整录音文件),架构层跟毫不相干。
|
||||
- **System Audio Capture(macOS SCK)**:Deepview 只录麦克风,无系统拾音需求。
|
||||
|
||||
> **结论**:从 MindOS 拿走的是 **"保活框架"(3 个文件)** 和 **"平台抽象模式"**。音频采集本身重写为极简的 `MediaRecorder` 方案。
|
||||
|
||||
---
|
||||
|
||||
## 2. 在 Deepview 复用什么
|
||||
|
||||
### 2.1 已跑通的后端管线(100% 复用,零修改)
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
A["前端: File 对象"] --> B["api.getUploadToken()"]
|
||||
B --> C["api.uploadToOSS(putUrl, file)"]
|
||||
C --> D["api.confirmUpload(ossKey, filename, contextId)"]
|
||||
D --> E["deepview_materials.py\n_extract_audio_asr()"]
|
||||
E --> F["DashScope Paraformer V2\n说话人分离 ASR"]
|
||||
F --> G["asr.md 物理落盘"]
|
||||
G --> H["report/generate\n→ report_draft.md"]
|
||||
H --> I["归档 → profile.md 更新"]
|
||||
```
|
||||
|
||||
整条链路中,唯一的入口变量是 **`File` 对象从哪来**:
|
||||
- **当前情况(长按上传)**:`<input type="file">` → `File` → `runRealPipeline(file)`
|
||||
- **新增情况(短按录音)**:`MediaRecorder.stop()` → `Blob` → `new File([blob])` → `runRealPipeline(file)`
|
||||
|
||||
**后端不需要任何修改。** ASR 处理函数 `_extract_audio_asr` 已支持 `.m4a` / `.mp3` / `.wav` 扩展名。
|
||||
|
||||
### 2.2 已跑通的前端管线骨架(复用 `app.ts` 已有逻辑)
|
||||
|
||||
当前 `app.ts` 中已有完整的管线调度 :
|
||||
|
||||
| 已有代码 | 所在位置 | 作用与是否修改 |
|
||||
|---|---|---|
|
||||
| `runRealPipeline(file: File)` | `app.ts:391` | **复用** — Upload → Confirm → Navigate 的统一入口 |
|
||||
| `handleFileUpload(event)` | `app.ts:378` | **复用** — 长按触发文件选择 |
|
||||
| `startRecording()` / `stopAndProcess()` | `app.ts:356/365` | **修改** — 目前 `startRecording` 只启动了计时器和 Mock UI,需真正接入 `MediaRecorder` |
|
||||
| `runAgentPipeline()` | `app.ts:422` | **删除** — 这是 mock 占位逻辑,将被实际录音替代 |
|
||||
|
||||
---
|
||||
|
||||
## 3. 平台分级与统一管线保证
|
||||
|
||||
### 3.1 两种运行环境的能力矩阵
|
||||
|
||||
| 能力 | Web 浏览器(当前) | Tauri + Android(目标) |
|
||||
|---|---|---|
|
||||
| 文件选择上传 | ✅ `<input type="file">` | ✅ 同上(WebView 内置) |
|
||||
| 前台录音 | ✅ `navigator.mediaDevices.getUserMedia` + `MediaRecorder` | ✅ 同上(WebView 支持,且屏幕常亮无干扰) |
|
||||
| 后台录音保活 | ❌ **必死**(浏览器退到后台会被 throttle/kill) | ✅ `RecordingForegroundService` + WakeLock |
|
||||
| 高优先级通知栏 | ❌ 不适用 | ✅ `startForeground + FOREGROUND_SERVICE_TYPE_MICROPHONE` |
|
||||
|
||||
### 3.2 统一管线保证:单一 File → Pipeline 入口
|
||||
|
||||
```
|
||||
┌──────────────────────────────────┐
|
||||
│ 前端 (Angular 20) │
|
||||
│ │
|
||||
│ 来源 A: input[type=file] │──┐
|
||||
│ 来源 B: MediaRecorder.stop() │──┤ 统一产出: File 对象
|
||||
│ 来源 C*: Tauri invoke 回调 │──┘
|
||||
│ │
|
||||
│ ↓ 全部汇入 ↓ │
|
||||
│ runRealPipeline(file: File) │ ← 唯一管线入口
|
||||
│ ↓ │
|
||||
│ getUploadToken → uploadToOSS │
|
||||
│ → confirmUpload(ossKey, ...) │
|
||||
└──────────┬───────────────────────┘
|
||||
↓
|
||||
┌──────────────────────────────────┐
|
||||
│ 后端 (Python Gateway) │
|
||||
│ │
|
||||
│ _handleMaterialsConfirm │
|
||||
│ ↓ │
|
||||
│ deepview_materials.py │
|
||||
│ _extract_audio_asr(raw, md) │
|
||||
│ ↓ │
|
||||
│ DashScope Paraformer V2 ASR │
|
||||
│ ↓ │
|
||||
│ asr.md 物理落盘 │
|
||||
│ ↓ │
|
||||
│ → report/generate → Markdown-First Pipeline
|
||||
└──────────────────────────────────┘
|
||||
```
|
||||
|
||||
**关键约束:无论音频从什么来源产生,到达 `runRealPipeline` 时都必须是一个标准的 `File` 对象。后端永远只面对一个 OSS key、一个 filename、一个 contextId。** 这确保了:
|
||||
- Web PC 端手动选文件 → 同一条管线
|
||||
- Web 手机端前台录音 → 同一条管线
|
||||
- Tauri Android 后台录音 → 同一条管线
|
||||
|
||||
---
|
||||
|
||||
## 4. 分阶段交付计划
|
||||
|
||||
### Phase 1: Web 前台录音(无 Tauri 依赖)
|
||||
**目标**:让现有的"短按面诊"按钮真正可用,替代当前的 Mock 空转。
|
||||
|
||||
改动范围仅限 `app.ts` :
|
||||
1. `startRecording()` 内调用 `navigator.mediaDevices.getUserMedia` + `new MediaRecorder`
|
||||
2. `stopAndProcess()` 内拿到 `Blob` → `new File([blob], 'recording_xxx.m4a')` → `this.runRealPipeline(file)`
|
||||
3. 删除 `runAgentPipeline()` 这个 mock 函数
|
||||
|
||||
**后端改动**:无。
|
||||
**预期效果**:用户在 PC/手机浏览器打开 Deepview,短按开始录音,再按停止,Mixin 面板自动弹起加载骨架屏,2~3 分钟后即可查看 X 光片报告。
|
||||
|
||||
### Phase 2: Tauri Android 壳化 + 后台保活
|
||||
**目标**:将当前 Angular SPA 包裹进 Tauri 2 的 Android WebView 壳中,启用后台录音。
|
||||
|
||||
改动范围:
|
||||
1. 初始化 Tauri Android 工程:`cargo tauri android init`
|
||||
2. 从 MindOS 迁移三件套:`RecordingForegroundService.kt` / `RecordingServicePlugin.kt` / Manifest 权限
|
||||
3. 前端增加 `PlatformService` 感知 `isTauri`
|
||||
4. 录音逻辑走分支:
|
||||
- `isTauri = false`(Phase 1 逻辑)→ `MediaRecorder`
|
||||
- `isTauri = true` → `invoke('plugin:recording-service|start_service')` → 原生录音 → `invoke('plugin:recording-service|stop_service')` 回调文件路径 → `new File(...)` → `runRealPipeline(file)`
|
||||
|
||||
**后端改动**:无。
|
||||
|
||||
---
|
||||
|
||||
## 5. 风险与预案
|
||||
|
||||
| 风险 | 级别 | 预案 |
|
||||
|---|---|---|
|
||||
| Web `MediaRecorder` 在部分 Android 浏览器 codec 不标准 | 中 | 强制 `mimeType: 'audio/webm;codecs=opus'`,后端 ASR 引擎已支持 webm |
|
||||
| Tauri Android 构建时 Kotlin 版本冲突 | 低 | MindOS 已验证 Tauri 2.x + Kotlin 1.9.x 兼容性 |
|
||||
| 录音超过 4 小时 WakeLock 自释放 | 极低 | 面诊通常 30 分钟内,4h 上限绰绰有余。且前端 UI 有计时器显示 |
|
||||
| DashScope ASR 对 webm 格式兼容性 | 中 | 如不兼容,在 `_extract_audio_asr` 中追加 ffmpeg 转码为 wav |
|
||||
@ -88,37 +88,46 @@
|
||||
<!-- STATE 1: Actively Recording (only when showActionSheet is true) -->
|
||||
<ng-container *ngIf="showActionSheet">
|
||||
<div class="recording-state" *ngIf="isRecording">
|
||||
<div class="rec-timer">
|
||||
<div class="red-dot animate-pulse"></div>
|
||||
{{ formatDuration(recordDuration) }}
|
||||
<div class="rec-timer" style="display: flex; flex-direction: column; align-items: center; justify-content: center; gap: 4px;">
|
||||
<div style="font-size: 40px; font-weight: bold; display: flex; align-items: center; justify-content: center; gap: 8px; color: var(--color-slate-800);">
|
||||
<div class="red-dot animate-pulse" *ngIf="!isRecordingPaused" style="width: 12px; height: 12px; background-color: var(--color-primary); border-color: var(--color-primary); box-shadow: 0 0 0 0 rgba(var(--color-primary-rgb, 59, 130, 246), 0.7);"></div>
|
||||
<div class="red-dot" *ngIf="isRecordingPaused" style="width: 12px; height: 12px; background-color: var(--color-slate-400); border-color: var(--color-slate-400); animation: none; box-shadow: none;"></div>
|
||||
<span>{{ formatDuration(recordDuration) }}</span>
|
||||
</div>
|
||||
<p class="rec-subtitle text-center text-secondary" style="font-size: 14px; margin: 0; color: var(--color-slate-500);">
|
||||
{{ isRecordingPaused ? '录音已暂停' : '正在实时接听面诊中...' }}
|
||||
</p>
|
||||
</div>
|
||||
<p class="rec-subtitle">深维云脑:正在实时接听面诊中...</p>
|
||||
|
||||
<button class="stop-btn" (click)="stopAndProcess()">
|
||||
<svg width="20" height="20" viewBox="0 0 24 24" fill="currentColor"><rect x="6" y="6" width="12" height="12" rx="2" ry="2"></rect></svg>
|
||||
结束对谈 · 生成绝杀 X光片
|
||||
</button>
|
||||
</div>
|
||||
<!-- STATE 2: Processing (The Relay Bar) -->
|
||||
<div class="processing-state" *ngIf="isProcessing">
|
||||
<h3 class="process-title" *ngIf="!uploadComplete">🧠 深维专家团并行推理中...</h3>
|
||||
<h3 class="process-title text-success" *ngIf="uploadComplete">✅ X光片就绪</h3>
|
||||
|
||||
<div class="relay-nodes">
|
||||
@for (agent of agents; track agent.id; let i = $index) {
|
||||
<div class="agent-node" [class.active]="agent.status === 'processing'" [class.done]="agent.status === 'completed'">
|
||||
<div class="node-icon-wrapper">
|
||||
<span class="node-icon" [innerHTML]="agent.iconHtml"></span>
|
||||
<div class="pulse-ring" *ngIf="agent.status === 'processing'"></div>
|
||||
</div>
|
||||
<span class="node-name">{{ agent.name }}</span>
|
||||
<div class="connector" *ngIf="i < agents.length - 1" [class.active]="agent.status === 'completed'"></div>
|
||||
<!-- Reusing Pill Navigation layout for recording controls -->
|
||||
<div class="pill-nav-container" style="position: relative; margin-top: 40px; padding: 0; background: none; box-shadow: none; pointer-events: auto;">
|
||||
<div class="pill-nav" style="margin: 0; width: 100%; justify-content: space-between;">
|
||||
|
||||
<!-- Left: Pause/Resume -->
|
||||
<a class="pill-item" style="cursor: pointer; width: 33%; color: #475569;" (click)="togglePauseResume()">
|
||||
<svg *ngIf="!isRecordingPaused" width="22" height="22" viewBox="0 0 24 24" fill="currentColor" stroke="none"><rect x="6" y="4" width="4" height="16" rx="1"></rect><rect x="14" y="4" width="4" height="16" rx="1"></rect></svg>
|
||||
<svg *ngIf="isRecordingPaused" width="22" height="22" viewBox="0 0 24 24" fill="var(--color-primary)" stroke="none"><polygon points="5 3 19 12 5 21 5 3"></polygon></svg>
|
||||
<span *ngIf="!isRecordingPaused">暂停对谈</span>
|
||||
<span *ngIf="isRecordingPaused" style="color: var(--color-primary); font-weight: 500;">继续录音</span>
|
||||
</a>
|
||||
|
||||
<!-- Center: Recording Indicator -->
|
||||
<div class="pill-action">
|
||||
<button class="mic-bubble" style="background-color: var(--color-primary); box-shadow: 0 8px 16px rgba(59, 130, 246, 0.3); cursor: default;">
|
||||
<div class="red-dot" [class.animate-pulse]="!isRecordingPaused" style="background-color: white; border-color: white; width: 12px; height: 12px; margin-right: 0; box-shadow: none;"></div>
|
||||
</button>
|
||||
</div>
|
||||
}
|
||||
|
||||
<!-- Right: Stop -->
|
||||
<a class="pill-item" style="cursor: pointer; width: 33%;" (click)="stopAndProcess()">
|
||||
<svg width="22" height="22" viewBox="0 0 24 24" fill="var(--color-primary)" stroke="none"><rect x="6" y="6" width="12" height="12" rx="2" ry="2"></rect></svg>
|
||||
<span style="color: var(--color-primary); font-weight: 500;">结束生成</span>
|
||||
</a>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<p class="cost-hint text-center mt-2" *ngIf="!uploadComplete">预估消耗 1000 算力点数</p>
|
||||
</div>
|
||||
|
||||
</ng-container>
|
||||
|
||||
<!-- STATE 3: Chat Sheet -->
|
||||
|
||||
@ -49,6 +49,7 @@ export class App implements OnDestroy, OnInit {
|
||||
// Recording & Upload Modal State
|
||||
showActionSheet = false;
|
||||
isRecording = false;
|
||||
isRecordingPaused = false;
|
||||
isProcessing = false;
|
||||
uploadComplete = false;
|
||||
|
||||
@ -356,17 +357,30 @@ export class App implements OnDestroy, OnInit {
|
||||
startRecording() {
|
||||
this.showActionSheet = true;
|
||||
this.isRecording = true;
|
||||
this.isRecordingPaused = false;
|
||||
this.isProcessing = false;
|
||||
this.uploadComplete = false;
|
||||
this.recordDuration = 0;
|
||||
this.recordingTimer = setInterval(() => { this.recordDuration++; }, 1000);
|
||||
this.recordingTimer = setInterval(() => {
|
||||
if (!this.isRecordingPaused) {
|
||||
this.recordDuration++;
|
||||
}
|
||||
}, 1000);
|
||||
}
|
||||
|
||||
stopAndProcess() {
|
||||
// Inbox SPEC: 不再需要客户校验,直接上云
|
||||
this.isRecording = false;
|
||||
this.isRecordingPaused = false;
|
||||
if (this.recordingTimer) clearInterval(this.recordingTimer);
|
||||
this.runAgentPipeline();
|
||||
|
||||
// The native audio blob hasn't been bridged yet (Tauri V4 audio pipeline pending)
|
||||
// For now, close the sheet. (Users can long-press to test the real file upload pipeline).
|
||||
this.showActionSheet = false;
|
||||
alert('Tauri 原生录音桥接尚未完成。如需体验真实推理管线,请长按录音键使用本地录音文件上传。');
|
||||
}
|
||||
|
||||
togglePauseResume() {
|
||||
this.isRecordingPaused = !this.isRecordingPaused;
|
||||
}
|
||||
|
||||
triggerUploadFile() {
|
||||
@ -419,36 +433,9 @@ export class App implements OnDestroy, OnInit {
|
||||
}
|
||||
|
||||
// --- Mock Pipeline for Recording ---
|
||||
runAgentPipeline() {
|
||||
this.isProcessing = true;
|
||||
this.agents.forEach(a => a.status = 'pending');
|
||||
this.agents[0].status = 'processing';
|
||||
|
||||
const processNext = (index: number) => {
|
||||
this.agents[index].status = 'processing';
|
||||
setTimeout(() => {
|
||||
if (index === this.agents.length - 1) {
|
||||
this.agents[index].status = 'completed';
|
||||
this.uploadComplete = true;
|
||||
// Auto close after 2.5 seconds
|
||||
setTimeout(() => { this.showActionSheet = false; }, 2500);
|
||||
} else {
|
||||
this.agents[index].status = 'completed';
|
||||
processNext(index + 1);
|
||||
}
|
||||
}, 800);
|
||||
};
|
||||
|
||||
setTimeout(() => {
|
||||
this.agents[0].status = 'completed';
|
||||
processNext(1);
|
||||
}, 800);
|
||||
}
|
||||
|
||||
closeSheets() {
|
||||
if (!this.isRecording && !this.isProcessing) {
|
||||
this.showActionSheet = false;
|
||||
}
|
||||
this.showActionSheet = false;
|
||||
this.showChatSheet = false;
|
||||
}
|
||||
|
||||
|
||||
@ -28,20 +28,23 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="client-body">
|
||||
<div class="demand-row">
|
||||
<strong>核心诉求:</strong>{{ client.project || '待AI挖掘/人工补全' }}
|
||||
</div>
|
||||
|
||||
<div class="barrier-box" *ngIf="client.ltcStatus && client.ltcStatus?.stage !== 'won'">
|
||||
<div class="barrier-title">⚠️ AI 诊断阻力</div>
|
||||
<div class="barrier-text">{{ client.ltcStatus?.coreBarrier }}</div>
|
||||
</div>
|
||||
<div class="client-body" style="padding-top: 8px;">
|
||||
<!-- Active State: Has Topics -->
|
||||
<ng-container *ngIf="client.ltcStatus?.topics?.length">
|
||||
<div class="action-box" style="background-color: rgba(39, 174, 96, 0.05); border: 1px solid rgba(39, 174, 96, 0.2); border-radius: 8px; padding: 12px;">
|
||||
<div class="action-title" style="color: #27ae60; font-size: 13px; font-weight: 600; margin-bottom: 6px;">💡 可探索的话题(客户曾主动询问)</div>
|
||||
<ul class="action-text vital-list" style="margin: 0; padding-left: 18px; font-size: 13px; color: var(--color-slate-700); line-height: 1.6;">
|
||||
<li *ngFor="let topic of client.ltcStatus.topics" style="margin-bottom: 4px;">{{ topic }}</li>
|
||||
</ul>
|
||||
</div>
|
||||
</ng-container>
|
||||
|
||||
<div class="action-box" *ngIf="client.ltcStatus">
|
||||
<div class="action-title">💡 下一步建议</div>
|
||||
<div class="action-text">{{ client.ltcStatus?.nextAction }}</div>
|
||||
</div>
|
||||
<!-- Fallback State: No Topics -->
|
||||
<ng-container *ngIf="!client.ltcStatus?.topics?.length">
|
||||
<div class="demand-row">
|
||||
<strong>分析状态:</strong>等待归档面诊录音生成档案...
|
||||
</div>
|
||||
</ng-container>
|
||||
</div>
|
||||
|
||||
<div class="client-footer">
|
||||
|
||||
Loading…
Reference in New Issue
Block a user