jymh commited on
Commit
9fca710
·
1 Parent(s): 9b71d73

first commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. README.md +126 -0
  3. librispeech/dev-clean_filter.json +3 -0
  4. librispeech/dev-clean_filtered_asr.json +3 -0
  5. librispeech/dev-clean_pc.json +3 -0
  6. librispeech/dev-other_filter.json +3 -0
  7. librispeech/dev-other_filtered_asr.json +3 -0
  8. librispeech/dev-other_pc.json +3 -0
  9. librispeech/test-clean_1000_filter.json +3 -0
  10. librispeech/test-clean_1000_filtered_asr.json +3 -0
  11. librispeech/test-clean_1000_pc.json +3 -0
  12. librispeech/test-clean_100_filter.json +3 -0
  13. librispeech/test-clean_100_filtered_asr.json +3 -0
  14. librispeech/test-clean_100_pc.json +3 -0
  15. librispeech/test-clean_2000_filter.json +3 -0
  16. librispeech/test-clean_2000_filtered_asr.json +3 -0
  17. librispeech/test-clean_2000_pc.json +3 -0
  18. librispeech/test-clean_500_filter.json +3 -0
  19. librispeech/test-clean_500_filtered_asr.json +3 -0
  20. librispeech/test-clean_500_pc.json +3 -0
  21. librispeech/test-other_1000_filter.json +3 -0
  22. librispeech/test-other_1000_filtered_asr.json +3 -0
  23. librispeech/test-other_1000_pc.json +3 -0
  24. librispeech/test-other_100_filter.json +3 -0
  25. librispeech/test-other_100_filtered_asr.json +3 -0
  26. librispeech/test-other_100_pc.json +3 -0
  27. librispeech/test-other_2000_filter.json +3 -0
  28. librispeech/test-other_2000_filtered_asr.json +3 -0
  29. librispeech/test-other_2000_pc.json +3 -0
  30. librispeech/test-other_500_filter.json +3 -0
  31. librispeech/test-other_500_filtered_asr.json +3 -0
  32. librispeech/test-other_500_pc.json +3 -0
  33. librispeech/train-clean-460_filter.json +3 -0
  34. librispeech/train-clean-460_filtered_asr.json +3 -0
  35. librispeech/train-clean-460_pc.json +3 -0
  36. librispeech/train-other-500_filter.json +3 -0
  37. librispeech/train-other-500_filtered_asr.json +3 -0
  38. librispeech/train-other-500_pc.json +3 -0
  39. slidespeech/slidespeech_L95/dev.json +3 -0
  40. slidespeech/slidespeech_L95/test.json +3 -0
  41. slidespeech/slidespeech_L95/train.json +3 -0
  42. slidespeech/slidespeech_L95_5slides/dev.json +3 -0
  43. slidespeech/slidespeech_L95_5slides/test.json +3 -0
  44. slidespeech/slidespeech_L95_5slides/train.json +3 -0
  45. slidespeech/slidespeech_L95_5slides_filter/S95.json +3 -0
  46. slidespeech/slidespeech_L95_5slides_filter/dev.json +3 -0
  47. slidespeech/slidespeech_L95_5slides_filter/test.json +3 -0
  48. slidespeech/slidespeech_L95_5slides_filter/train.json +3 -0
  49. slidespeech/slidespeech_L95_5slides_filtered_train/S95.json +3 -0
  50. slidespeech/slidespeech_L95_5slides_filtered_train/count_keywords_after_filter_251011.py +146 -0
.gitattributes CHANGED
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ *.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
  ---
 
1
+ # SAP²-ASR Dataset
2
+
3
+ 本数据集用于 **SAP² (Speech-Aware Long Context Pruning and Integration)** 方法的上下文感知自动语音识别(ASR)研究。
4
+
5
+ ## 📖 简介
6
+
7
+ SAP² 是一个用于上下文感知自动语音识别的新框架,能够动态剪枝并集成相关的上下文关键词。该方法解决了在特定领域场景(如会议演讲)中利用长上下文信息的挑战,这些场景中大量来自OCR的文本上下文既包含相关信息,也包含大量噪声。
8
+
9
+ ### 核心特性
10
+
11
+ - **语音感知上下文剪枝**:动态过滤来自OCR的文本上下文,仅保留与语音内容直接相关的关键词
12
+ - **跨模态上下文压缩**:使用语音驱动注意力池化(Speech-Driven Attention-based Pooling)将大量文本输入压缩为简洁的、与语音相关的上下文嵌入
13
+ - **最先进的性能**:在 SlideSpeech 数据集上达到 7.71% 的词错误率(WER),在 LibriSpeech 数据集上达到 1.12% 的 WER,相比非上下文基线,在偏向关键词识别方面相对提升了 41.1%
14
+
15
+ ## 📊 数据集结构
16
+
17
+ 本数据集包含两个主要子数据集:
18
+
19
+ ### SlideSpeech
20
+
21
+ - **来源**:SlideSpeech 是一个包含幻灯片的大规模音视频语料库,包含 1,705 个视频,超过 1,000 小时的音频,其中包括 473 小时的高质量转录语音
22
+ - **数据格式**:JSON 格式,包含音频路径和带上下文关键词的对话格式
23
+ - **目录结构**:
24
+ - `slidespeech_L95/`: 原始数据(对应论文中PC)
25
+ - `slidespeech_L95_filter/`: 训练TPI第一阶段
26
+ - `slidespeech_L95_filtered_train`: 训练TPI第二阶段
27
+ - `slidespeech_L95_5slides/`: 5张幻灯片版本
28
+ - `slidespeech_L95_multitask/`: 多任务版本(对应论文中的JPI)
29
+
30
+ ### LibriSpeech
31
+
32
+ - **来源**:LibriSpeech 是一个大规模英语朗读语音语料库,源自 LibriVox 项目的有声读物
33
+ - **数据格式**:JSON 格式,包含训练、验证和测试集的不同配置
34
+ - **目录结构**:
35
+ - `train-clean-460_*.json`: 训练集(clean,460小时)
36
+ - `train-other-500_*.json`: 训练集(other,500小时)
37
+ - `dev-clean_*.json`, `dev-other_*.json`: 验证集
38
+ - `test-clean_*.json`, `test-other_*.json`: 测试集(不同规模:100, 500, 1000, 2000条)
39
+
40
+ ### 数据格式示例
41
+
42
+ ```json
43
+ {
44
+ "messages": [
45
+ {
46
+ "role": "user",
47
+ "content": "<audio>/path/to/audio.wav</audio>Transcribe speech to text according to keywords may appear in the utterance. Possible keywords are: <|startofcontext|>keyword1 keyword2 keyword3<|endofcontext|>"
48
+ },
49
+ {
50
+ "role": "assistant",
51
+ "content": "transcribed text"
52
+ }
53
+ ],
54
+ "audios": "/path/to/audio.wav"
55
+ }
56
+ ```
57
+
58
+ **关键标记**:
59
+ - `<|startofcontext|>` 和 `<|endofcontext|>`: 用于标记上下文关键词的特殊标记
60
+ - `<audio>...</audio>`: 音频文件路径标记
61
+
62
+ ## 🚀 使用方法
63
+
64
+ ### 加载数据集
65
+
66
+ ```python
67
+ import json
68
+
69
+ # 加载 SlideSpeech 数据集
70
+ with open('slidespeech/slidespeech_L95_filter/train.json', 'r') as f:
71
+ slidespeech_train = json.load(f)
72
+
73
+ # 加载 LibriSpeech 数据集
74
+ with open('librispeech/train-clean-460_filter.json', 'r') as f:
75
+ librispeech_train = json.load(f)
76
+ ```
77
+
78
+ ### 与 SAP² 模型一起使用
79
+
80
+ 详细的使用说明、训练和推理代码请参考:
81
+
82
+ **🔗 [SAP²-ASR GitHub Repository](https://github.com/jymh/SAP2-ASR.git)**
83
+
84
+ 该仓库包含:
85
+ - 完整的模型实现代码
86
+ - 训练和推理脚本
87
+ - 数据预处理工具
88
+ - 评估脚本
89
+ - 详细的使用文档
90
+
91
+ ## 📎 引用
92
+
93
+ 如果您在研究中使用了本数据集,请引用以下论文:
94
+
95
+ ```bibtex
96
+ @article{rong2025speechaware,
97
+ title={Speech-Aware Long Context Pruning and Integration for Contextualized Automatic Speech Recognition},
98
+ author={Rong, Yiming and Zhang, Yixin and Wang, Ziyi and Jiang, Deyang and Zhao, Yunlong and Wu, Haoran and Zhou, Shiyu and Xu, Bo},
99
+ journal={arXiv preprint arXiv:2511.11139},
100
+ year={2025}
101
+ }
102
+ ```
103
+
104
+ **论文链接**:[https://www.arxiv.org/abs/2511.11139](https://www.arxiv.org/abs/2511.11139)
105
+
106
+ ## 📚 相关资源
107
+
108
+ - **代码仓库**:[https://github.com/jymh/SAP2-ASR.git](https://github.com/jymh/SAP2-ASR.git)
109
+ - **论文**:[arXiv:2511.11139](https://www.arxiv.org/abs/2511.11139)
110
+ - **SlideSpeech 原始数据集**:[https://slidespeech.github.io/](https://slidespeech.github.io/)
111
+ - **LibriSpeech 原始数据集**:[OpenSLR](https://www.openslr.org/12/)
112
+
113
+ ## 🏛 许可证
114
+
115
+ 本数据集的使用请遵循原始数据集的许可证要求。对于 SlideSpeech 和 LibriSpeech,请参考其原始资源页面的许可证说明。
116
+
117
+ ## ⚠️ 注意事项
118
+
119
+ 1. 音频文件路径可能需要根据实际环境进行调整
120
+ 2. 数据集文件较大,请确保有足够的存储空间
121
+ 3. 使用前请仔细阅读 [GitHub 仓库](https://github.com/jymh/SAP2-ASR.git) 中的详细文档
122
+
123
+ ---
124
+
125
+ **更多信息和使用示例,请访问 [SAP²-ASR GitHub Repository](https://github.com/jymh/SAP2-ASR.git)**
126
+
127
  ---
128
  license: apache-2.0
129
  ---
librispeech/dev-clean_filter.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:494079e66f5983e481a2423858292e68b7a271e9ba1a704182a0ab3c078860f0
3
+ size 16995084
librispeech/dev-clean_filtered_asr.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:214bd3858a522b3411d9ba2a6a8e3492e155239bbb637a74ebc1936a63893e83
3
+ size 1361688
librispeech/dev-clean_pc.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:569c4c5bc96b7a8bc1b3edcc51d7114f38c481e7d1e4d4bc6507e08e4c28af51
3
+ size 17284317
librispeech/dev-other_filter.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:047450d5d1742788e9600fd1430378a24499c9f9ad51a953f49d1dd1d60c6b46
3
+ size 17947988
librispeech/dev-other_filtered_asr.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67bc165d5ab132c65ae18d9d8ebbe6a5496fb4a3d4084e86bb49a756b80db013
3
+ size 1378646
librispeech/dev-other_pc.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf629643a4d0b12969776917ddc013248c17025d170366c19bdfb0b7f34ac388
3
+ size 18225434
librispeech/test-clean_1000_filter.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dc449ac48a83d8a5db2f435932416b7d99e198114571a249ec9c6986b304239
3
+ size 26661553
librispeech/test-clean_1000_filtered_asr.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:868eb9e72ca917406659dd93b1fda09acec7b4f063a8e9c711547f8b7680b215
3
+ size 1322871
librispeech/test-clean_1000_pc.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e056ed82f802a8cf61e6714de09caef1a78b68eda6349e6447e7a86aa1849de8
3
+ size 26941872
librispeech/test-clean_100_filter.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aefeed8018ffe5fb6f8b40803537524e0bbe1bf444e8f93ecc2dbb9524c71a28
3
+ size 3649107
librispeech/test-clean_100_filtered_asr.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:868eb9e72ca917406659dd93b1fda09acec7b4f063a8e9c711547f8b7680b215
3
+ size 1322871
librispeech/test-clean_100_pc.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:066d66baf4f5f632f377147ad7c9e9c3c831202b4de1f0f2ff4ce18e1b156859
3
+ size 3929426
librispeech/test-clean_2000_filter.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ae13a50bd8a21810aa3a5eda7dec09319b87c53b735777c3a5a395fb8e21861
3
+ size 52137181
librispeech/test-clean_2000_filtered_asr.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:868eb9e72ca917406659dd93b1fda09acec7b4f063a8e9c711547f8b7680b215
3
+ size 1322871
librispeech/test-clean_2000_pc.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a405223fa0156c25ab722ab10a08d11768c1b103192568ea838a031f67741064
3
+ size 52417500
librispeech/test-clean_500_filter.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05bf5c581551335dcf0f1dfd4eff64f7ddc73c128f9ef9340cf27c78c4fae587
3
+ size 13884322
librispeech/test-clean_500_filtered_asr.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:868eb9e72ca917406659dd93b1fda09acec7b4f063a8e9c711547f8b7680b215
3
+ size 1322871
librispeech/test-clean_500_pc.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbfaf6cfaec254deecc8e25970430d7a7a070dff5ea57af05fc9aabdd738a59e
3
+ size 14164641
librispeech/test-other_1000_filter.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f2fb2bd24bcf99f9fdfd6f43027055df124c2a03299b57493a1f3c3cb6bc5ee
3
+ size 29880756
librispeech/test-other_1000_filtered_asr.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:186f155270c515fabcfe2a4e2204ac0e80964a000e6de80a39fa2e6cc9e959b3
3
+ size 1424509
librispeech/test-other_1000_pc.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8796dc23c6cc4b154a265ffbde3061480eaf4ec74f2063852542344a4767edc
3
+ size 30164633
librispeech/test-other_100_filter.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4719130b77ddbcf44732cf21a536430007d4cfebbb4b484742d5409a639b2f3
3
+ size 4071171
librispeech/test-other_100_filtered_asr.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:186f155270c515fabcfe2a4e2204ac0e80964a000e6de80a39fa2e6cc9e959b3
3
+ size 1424509
librispeech/test-other_100_pc.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c27aac655cfd574cad309e26cd9f155a1bbb6b01e64cc2d950b9a2cf37d76a5
3
+ size 4355048
librispeech/test-other_2000_filter.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72dfb730e0167d794205ebe98077d39cf3969616a819021969192df7a68b5e73
3
+ size 58468062
librispeech/test-other_2000_filtered_asr.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:186f155270c515fabcfe2a4e2204ac0e80964a000e6de80a39fa2e6cc9e959b3
3
+ size 1424509
librispeech/test-other_2000_pc.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40c8545d72c7c00a7b61f1a5509910116113ef62d488468fdc0baf8028ed0a40
3
+ size 58751939
librispeech/test-other_500_filter.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd7547955be409c296402b08666163d2fa32749afc5db9b6e77fc0054d3e251a
3
+ size 15552466
librispeech/test-other_500_filtered_asr.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:186f155270c515fabcfe2a4e2204ac0e80964a000e6de80a39fa2e6cc9e959b3
3
+ size 1424509
librispeech/test-other_500_pc.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bf0b30c6c20a654e6702a8ce96865b174dcb9b6f79c5bab266b7ef41df87644
3
+ size 15836343
librispeech/train-clean-460_filter.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18f739fa3b818a717d89ae028b4cf13c40ad202e257a4e8e414452ea2bf490d5
3
+ size 836153907
librispeech/train-clean-460_filtered_asr.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2582b8382240c4c280343b6e87f8b8046935aa3ad5a3a6295a9d865c4d8a2914
3
+ size 81066086
librispeech/train-clean-460_pc.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b535f5e4aedd1a7697c42624e1c4a236796d02caa6c695f29d1e1ff41e584342
3
+ size 858998364
librispeech/train-other-500_filter.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:882a52fa224a9f38c51cdc130877d95a126236e91e22983c8f41e3af87a1a244
3
+ size 936464758
librispeech/train-other-500_filtered_asr.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed6d94d9cbb825af3ea86253d0b2c2eda618e1444bb9eda411c5954905fd7cd9
3
+ size 88132611
librispeech/train-other-500_pc.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ddd785c26d91699815157d45b94ba7f4b36d6d49ffb5b57d23286a133004197
3
+ size 960631559
slidespeech/slidespeech_L95/dev.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a0b71a9c7348bc9c497f933f1b5bd53f8e96231c7225be635c5462e5fb71815
3
+ size 1401937
slidespeech/slidespeech_L95/test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf6ebe808b4877b618cb1c5fd2bd87bb36eec0f0525df80bd8ae234cbd7d558c
3
+ size 2487403
slidespeech/slidespeech_L95/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e35ec7372655356a14a2f6ab2b7ab7412169ce8abb1982014c86c371ec6e844b
3
+ size 307037255
slidespeech/slidespeech_L95_5slides/dev.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af2543de7cee371702862f96a143d5206c9fd76aa87e355d9c03ad5c2a560733
3
+ size 2928654
slidespeech/slidespeech_L95_5slides/test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3743038e77227a6a43bd98d14a83e334245094692f9cb7696c7def7a3c28002
3
+ size 5615310
slidespeech/slidespeech_L95_5slides/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d9434e2c49116ef76628ae73697b90ef922c45eef44f79cb82162930609ec55
3
+ size 708440282
slidespeech/slidespeech_L95_5slides_filter/S95.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bff6bb7a060dcd16af3a574ecbe4814f685efec45c6ef2620acc4da2212fb038
3
+ size 145470666
slidespeech/slidespeech_L95_5slides_filter/dev.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c970c517d93cb9922fd6ad117128d43367af2a46f4d05c9da9851717bb72bf7
3
+ size 2676197
slidespeech/slidespeech_L95_5slides_filter/test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a77df39f0da60abae746f8dcfb559ffbcb80af4f8638116600420738208398e
3
+ size 5171425
slidespeech/slidespeech_L95_5slides_filter/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a8428d6b11b40b4f274008cb89129d1bf65e05fd927ad29191e40b17a5302f9
3
+ size 676787747
slidespeech/slidespeech_L95_5slides_filtered_train/S95.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8b9f6435adc29c1a410d580432b9712309600a343861e79326d069045e584e3
3
+ size 41677744
slidespeech/slidespeech_L95_5slides_filtered_train/count_keywords_after_filter_251011.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ 统计JSON文件中每条样本的<|startofcontext|><|endofcontext|>间的平均词数
5
+ """
6
+
7
+ import json
8
+ import re
9
+ from typing import List, Dict, Any
10
+
11
+
12
+ def count_words_in_context(text: str) -> int:
13
+ """
14
+ 统计文本中<|startofcontext|>和<|endofcontext|>之间的词数
15
+
16
+ Args:
17
+ text: 输入文本
18
+
19
+ Returns:
20
+ context区域内的词数
21
+ """
22
+ # 使用正则表达式匹配<|startofcontext|>和<|endofcontext|>之间的内容
23
+ pattern = r'<\|startofcontext\|>(.*?)<\|endofcontext\|>'
24
+ matches = re.findall(pattern, text, re.DOTALL)
25
+
26
+ total_words = 0
27
+ for match in matches:
28
+ # 去除首尾空白字符,然后按空白字符分割计算词数
29
+ words = match.strip().split(", ")
30
+
31
+ words = [] if words == [''] else words
32
+ print(words)
33
+ total_words += len(words)
34
+
35
+ return total_words
36
+
37
+
38
+ def analyze_json_file(file_path: str) -> Dict[str, Any]:
39
+ """
40
+ 分析JSON文件中的context词数统计
41
+
42
+ Args:
43
+ file_path: JSON文件路径
44
+
45
+ Returns:
46
+ 包含统计结果的字典
47
+ """
48
+ print(f"正在读取文件: {file_path}")
49
+
50
+ with open(file_path, 'r', encoding='utf-8') as f:
51
+ data = json.load(f)
52
+
53
+ print(f"总样本数: {len(data)}")
54
+
55
+ word_counts = []
56
+ samples_with_context = 0
57
+ samples_without_context = 0
58
+
59
+ for i, sample in enumerate(data):
60
+ if i % 1000 == 0:
61
+ print(f"处理进度: {i}/{len(data)}")
62
+
63
+ # 检查每条消息中的content
64
+ sample_word_count = 0
65
+ has_context = False
66
+
67
+ for message in sample.get('messages', []):
68
+ content = message.get('content', '')
69
+ word_count = count_words_in_context(content)
70
+ sample_word_count += word_count
71
+
72
+ if '<|startofcontext|>' in content and '<|endofcontext|>' in content:
73
+ has_context = True
74
+
75
+ if has_context:
76
+ samples_with_context += 1
77
+ word_counts.append(sample_word_count)
78
+ else:
79
+ samples_without_context += 1
80
+
81
+ # 计算统计信息
82
+ if word_counts:
83
+ avg_words = sum(word_counts) / len(word_counts)
84
+ max_words = max(word_counts)
85
+ min_words = min(word_counts)
86
+
87
+ # 计算中位数
88
+ sorted_counts = sorted(word_counts)
89
+ n = len(sorted_counts)
90
+ median_words = (sorted_counts[n//2 - 1] + sorted_counts[n//2]) / 2 if n % 2 == 0 else sorted_counts[n//2]
91
+ else:
92
+ avg_words = max_words = min_words = median_words = 0
93
+
94
+ results = {
95
+ 'total_samples': len(data),
96
+ 'samples_with_context': samples_with_context,
97
+ 'samples_without_context': samples_without_context,
98
+ 'average_words_per_sample': avg_words,
99
+ 'max_words_per_sample': max_words,
100
+ 'min_words_per_sample': min_words,
101
+ 'median_words_per_sample': median_words,
102
+ 'word_counts_distribution': word_counts[:10] if len(word_counts) > 10 else word_counts # 显示前10个样本的词数
103
+ }
104
+
105
+ return results
106
+
107
+
108
+ def main():
109
+ """主函数"""
110
+ file_path = "/data8/rym/Projects/ms-swift/data/add_context_token/slidespeech_L95_5slides_filtered_train/test_from_window_size2_no_param.json"
111
+
112
+ try:
113
+ results = analyze_json_file(file_path)
114
+
115
+ print("\n" + "="*60)
116
+ print("统计结果:")
117
+ print("="*60)
118
+ print(f"总样本数: {results['total_samples']}")
119
+ print(f"包含context的样本数: {results['samples_with_context']}")
120
+ print(f"不包含context的样本数: {results['samples_without_context']}")
121
+ print(f"平均每条样本context区域词数: {results['average_words_per_sample']:.2f}")
122
+ print(f"最大词数: {results['max_words_per_sample']}")
123
+ print(f"最小词数: {results['min_words_per_sample']}")
124
+ print(f"中位数词数: {results['median_words_per_sample']:.2f}")
125
+
126
+ if results['word_counts_distribution']:
127
+ print(f"\n前10个样本的context词数分布:")
128
+ for i, count in enumerate(results['word_counts_distribution']):
129
+ print(f" 样本 {i+1}: {count} 词")
130
+
131
+ # # 保存详细结果到文件
132
+ # output_file = file_path.replace('.json', '_context_word_stats.json')
133
+ # with open(output_file, 'w', encoding='utf-8') as f:
134
+ # json.dump(results, f, ensure_ascii=False, indent=2)
135
+
136
+ # print(f"\n详细统计结果已保存到: {output_file}")
137
+
138
+ except Exception as e:
139
+ print(f"处理文件时出错: {e}")
140
+ return 1
141
+
142
+ return 0
143
+
144
+
145
+ if __name__ == "__main__":
146
+ exit(main())